diff --git a/.buildinfo b/.buildinfo index 3fae44737..f83c86a8f 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 62f8ee6a3f6aefe7ce3ae4611d664fc1 -tags: 645f666f9bcd5a90fca523b33c5a78b7 +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: ee4940ae2593c690ef5ae868e708b4af +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/api_summary.doctree b/.doctrees/api_summary.doctree new file mode 100644 index 000000000..801cb9968 Binary files /dev/null and b/.doctrees/api_summary.doctree differ diff --git a/.doctrees/auto_examples/index.doctree b/.doctrees/auto_examples/index.doctree new file mode 100644 index 000000000..d957d4757 Binary files /dev/null and b/.doctrees/auto_examples/index.doctree differ diff --git a/.doctrees/auto_examples/plot_backend.doctree b/.doctrees/auto_examples/plot_backend.doctree new file mode 100644 index 000000000..3ef8e9d68 Binary files /dev/null and b/.doctrees/auto_examples/plot_backend.doctree differ diff --git a/.doctrees/auto_examples/plot_benchmark_cdist.doctree b/.doctrees/auto_examples/plot_benchmark_cdist.doctree new file mode 100644 index 000000000..527b4f973 Binary files /dev/null and b/.doctrees/auto_examples/plot_benchmark_cdist.doctree differ diff --git a/.doctrees/auto_examples/plot_benchmark_pipeline.doctree b/.doctrees/auto_examples/plot_benchmark_pipeline.doctree new file mode 100644 index 000000000..4692ec8b4 Binary files /dev/null and b/.doctrees/auto_examples/plot_benchmark_pipeline.doctree differ diff --git a/.doctrees/auto_examples/plot_black_op.doctree b/.doctrees/auto_examples/plot_black_op.doctree new file mode 100644 index 000000000..d21071050 Binary files /dev/null and b/.doctrees/auto_examples/plot_black_op.doctree differ diff --git a/.doctrees/auto_examples/plot_cast_transformer.doctree b/.doctrees/auto_examples/plot_cast_transformer.doctree new file mode 100644 index 000000000..864d42131 Binary files /dev/null and b/.doctrees/auto_examples/plot_cast_transformer.doctree differ diff --git a/.doctrees/auto_examples/plot_complex_pipeline.doctree b/.doctrees/auto_examples/plot_complex_pipeline.doctree new file mode 100644 index 000000000..a4ec5e640 Binary files /dev/null and b/.doctrees/auto_examples/plot_complex_pipeline.doctree differ diff --git a/.doctrees/auto_examples/plot_convert_decision_function.doctree b/.doctrees/auto_examples/plot_convert_decision_function.doctree new file mode 100644 index 000000000..8b440435a Binary files /dev/null and b/.doctrees/auto_examples/plot_convert_decision_function.doctree differ diff --git a/.doctrees/auto_examples/plot_convert_model.doctree b/.doctrees/auto_examples/plot_convert_model.doctree new file mode 100644 index 000000000..a0399f945 Binary files /dev/null and b/.doctrees/auto_examples/plot_convert_model.doctree differ diff --git a/.doctrees/auto_examples/plot_convert_syntax.doctree b/.doctrees/auto_examples/plot_convert_syntax.doctree new file mode 100644 index 000000000..b56352adb Binary files /dev/null and b/.doctrees/auto_examples/plot_convert_syntax.doctree differ diff --git a/.doctrees/auto_examples/plot_convert_zipmap.doctree b/.doctrees/auto_examples/plot_convert_zipmap.doctree new file mode 100644 index 000000000..4fc6c029b Binary files /dev/null and b/.doctrees/auto_examples/plot_convert_zipmap.doctree differ diff --git a/.doctrees/auto_examples/plot_custom_model.doctree b/.doctrees/auto_examples/plot_custom_model.doctree new file mode 100644 index 000000000..a2b61f70e Binary files /dev/null and b/.doctrees/auto_examples/plot_custom_model.doctree differ diff --git a/.doctrees/auto_examples/plot_custom_parser.doctree b/.doctrees/auto_examples/plot_custom_parser.doctree new file mode 100644 index 000000000..1cc12fa0e Binary files /dev/null and b/.doctrees/auto_examples/plot_custom_parser.doctree differ diff --git a/.doctrees/auto_examples/plot_custom_parser_alternative.doctree b/.doctrees/auto_examples/plot_custom_parser_alternative.doctree new file mode 100644 index 000000000..86406101c Binary files /dev/null and b/.doctrees/auto_examples/plot_custom_parser_alternative.doctree differ diff --git a/.doctrees/auto_examples/plot_errors_onnxruntime.doctree b/.doctrees/auto_examples/plot_errors_onnxruntime.doctree new file mode 100644 index 000000000..2573404a5 Binary files /dev/null and b/.doctrees/auto_examples/plot_errors_onnxruntime.doctree differ diff --git a/.doctrees/auto_examples/plot_gpr.doctree b/.doctrees/auto_examples/plot_gpr.doctree new file mode 100644 index 000000000..638117f42 Binary files /dev/null and b/.doctrees/auto_examples/plot_gpr.doctree differ diff --git a/.doctrees/auto_examples/plot_intermediate_outputs.doctree b/.doctrees/auto_examples/plot_intermediate_outputs.doctree new file mode 100644 index 000000000..2eb309aa1 Binary files /dev/null and b/.doctrees/auto_examples/plot_intermediate_outputs.doctree differ diff --git a/.doctrees/auto_examples/plot_investigate_pipeline.doctree b/.doctrees/auto_examples/plot_investigate_pipeline.doctree new file mode 100644 index 000000000..c2704570f Binary files /dev/null and b/.doctrees/auto_examples/plot_investigate_pipeline.doctree differ diff --git a/.doctrees/auto_examples/plot_logging.doctree b/.doctrees/auto_examples/plot_logging.doctree new file mode 100644 index 000000000..ff325ab69 Binary files /dev/null and b/.doctrees/auto_examples/plot_logging.doctree differ diff --git a/.doctrees/auto_examples/plot_metadata.doctree b/.doctrees/auto_examples/plot_metadata.doctree new file mode 100644 index 000000000..6fb141313 Binary files /dev/null and b/.doctrees/auto_examples/plot_metadata.doctree differ diff --git a/.doctrees/auto_examples/plot_nmf.doctree b/.doctrees/auto_examples/plot_nmf.doctree new file mode 100644 index 000000000..17365fe56 Binary files /dev/null and b/.doctrees/auto_examples/plot_nmf.doctree differ diff --git a/.doctrees/auto_examples/plot_onnx_operators.doctree b/.doctrees/auto_examples/plot_onnx_operators.doctree new file mode 100644 index 000000000..68bba8e9e Binary files /dev/null and b/.doctrees/auto_examples/plot_onnx_operators.doctree differ diff --git a/.doctrees/auto_examples/plot_pipeline.doctree b/.doctrees/auto_examples/plot_pipeline.doctree new file mode 100644 index 000000000..01bded867 Binary files /dev/null and b/.doctrees/auto_examples/plot_pipeline.doctree differ diff --git a/.doctrees/auto_examples/plot_pipeline_lightgbm.doctree b/.doctrees/auto_examples/plot_pipeline_lightgbm.doctree new file mode 100644 index 000000000..79cf52909 Binary files /dev/null and b/.doctrees/auto_examples/plot_pipeline_lightgbm.doctree differ diff --git a/.doctrees/auto_examples/plot_pipeline_xgboost.doctree b/.doctrees/auto_examples/plot_pipeline_xgboost.doctree new file mode 100644 index 000000000..0af151925 Binary files /dev/null and b/.doctrees/auto_examples/plot_pipeline_xgboost.doctree differ diff --git a/.doctrees/auto_examples/plot_tfidfvectorizer.doctree b/.doctrees/auto_examples/plot_tfidfvectorizer.doctree new file mode 100644 index 000000000..c11fdb049 Binary files /dev/null and b/.doctrees/auto_examples/plot_tfidfvectorizer.doctree differ diff --git a/.doctrees/auto_examples/sg_execution_times.doctree b/.doctrees/auto_examples/sg_execution_times.doctree new file mode 100644 index 000000000..b8718512b Binary files /dev/null and b/.doctrees/auto_examples/sg_execution_times.doctree differ diff --git a/.doctrees/auto_tutorial/index.doctree b/.doctrees/auto_tutorial/index.doctree new file mode 100644 index 000000000..eea5356b8 Binary files /dev/null and b/.doctrees/auto_tutorial/index.doctree differ diff --git a/.doctrees/auto_tutorial/plot_abegin_convert_pipeline.doctree b/.doctrees/auto_tutorial/plot_abegin_convert_pipeline.doctree new file mode 100644 index 000000000..5a4837c53 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_abegin_convert_pipeline.doctree differ diff --git a/.doctrees/auto_tutorial/plot_bbegin_measure_time.doctree b/.doctrees/auto_tutorial/plot_bbegin_measure_time.doctree new file mode 100644 index 000000000..db26be23e Binary files /dev/null and b/.doctrees/auto_tutorial/plot_bbegin_measure_time.doctree differ diff --git a/.doctrees/auto_tutorial/plot_catwoe_transformer.doctree b/.doctrees/auto_tutorial/plot_catwoe_transformer.doctree new file mode 100644 index 000000000..cafe0305c Binary files /dev/null and b/.doctrees/auto_tutorial/plot_catwoe_transformer.doctree differ diff --git a/.doctrees/auto_tutorial/plot_cbegin_opset.doctree b/.doctrees/auto_tutorial/plot_cbegin_opset.doctree new file mode 100644 index 000000000..6d5c513cb Binary files /dev/null and b/.doctrees/auto_tutorial/plot_cbegin_opset.doctree differ diff --git a/.doctrees/auto_tutorial/plot_dbegin_options.doctree b/.doctrees/auto_tutorial/plot_dbegin_options.doctree new file mode 100644 index 000000000..8b8b48f1f Binary files /dev/null and b/.doctrees/auto_tutorial/plot_dbegin_options.doctree differ diff --git a/.doctrees/auto_tutorial/plot_dbegin_options_list.doctree b/.doctrees/auto_tutorial/plot_dbegin_options_list.doctree new file mode 100644 index 000000000..0d2a3ebfd Binary files /dev/null and b/.doctrees/auto_tutorial/plot_dbegin_options_list.doctree differ diff --git a/.doctrees/auto_tutorial/plot_dbegin_options_zipmap.doctree b/.doctrees/auto_tutorial/plot_dbegin_options_zipmap.doctree new file mode 100644 index 000000000..713ffd7a8 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_dbegin_options_zipmap.doctree differ diff --git a/.doctrees/auto_tutorial/plot_ebegin_float_double.doctree b/.doctrees/auto_tutorial/plot_ebegin_float_double.doctree new file mode 100644 index 000000000..e2e57d385 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_ebegin_float_double.doctree differ diff --git a/.doctrees/auto_tutorial/plot_fbegin_investigate.doctree b/.doctrees/auto_tutorial/plot_fbegin_investigate.doctree new file mode 100644 index 000000000..fdffcef68 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_fbegin_investigate.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gbegin_cst.doctree b/.doctrees/auto_tutorial/plot_gbegin_cst.doctree new file mode 100644 index 000000000..85ffb419c Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gbegin_cst.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gbegin_dataframe.doctree b/.doctrees/auto_tutorial/plot_gbegin_dataframe.doctree new file mode 100644 index 000000000..9dc14571c Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gbegin_dataframe.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gbegin_transfer_learning.doctree b/.doctrees/auto_tutorial/plot_gbegin_transfer_learning.doctree new file mode 100644 index 000000000..a24910d81 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gbegin_transfer_learning.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gconverting.doctree b/.doctrees/auto_tutorial/plot_gconverting.doctree new file mode 100644 index 000000000..c6da13ba3 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gconverting.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gexternal_catboost.doctree b/.doctrees/auto_tutorial/plot_gexternal_catboost.doctree new file mode 100644 index 000000000..ce3ad6492 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gexternal_catboost.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gexternal_lightgbm.doctree b/.doctrees/auto_tutorial/plot_gexternal_lightgbm.doctree new file mode 100644 index 000000000..b6a49a1ab Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gexternal_lightgbm.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gexternal_lightgbm_reg.doctree b/.doctrees/auto_tutorial/plot_gexternal_lightgbm_reg.doctree new file mode 100644 index 000000000..fc8506538 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gexternal_lightgbm_reg.doctree differ diff --git a/.doctrees/auto_tutorial/plot_gexternal_xgboost.doctree b/.doctrees/auto_tutorial/plot_gexternal_xgboost.doctree new file mode 100644 index 000000000..0dc9a9fe7 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_gexternal_xgboost.doctree differ diff --git a/.doctrees/auto_tutorial/plot_icustom_converter.doctree b/.doctrees/auto_tutorial/plot_icustom_converter.doctree new file mode 100644 index 000000000..eef349e58 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_icustom_converter.doctree differ diff --git a/.doctrees/auto_tutorial/plot_jcustom_syntax.doctree b/.doctrees/auto_tutorial/plot_jcustom_syntax.doctree new file mode 100644 index 000000000..25cecaa74 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_jcustom_syntax.doctree differ diff --git a/.doctrees/auto_tutorial/plot_kcustom_converter_wrapper.doctree b/.doctrees/auto_tutorial/plot_kcustom_converter_wrapper.doctree new file mode 100644 index 000000000..f51737846 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_kcustom_converter_wrapper.doctree differ diff --git a/.doctrees/auto_tutorial/plot_lcustom_options.doctree b/.doctrees/auto_tutorial/plot_lcustom_options.doctree new file mode 100644 index 000000000..bd480ea79 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_lcustom_options.doctree differ diff --git a/.doctrees/auto_tutorial/plot_mcustom_parser.doctree b/.doctrees/auto_tutorial/plot_mcustom_parser.doctree new file mode 100644 index 000000000..d0a018317 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_mcustom_parser.doctree differ diff --git a/.doctrees/auto_tutorial/plot_ngrams.doctree b/.doctrees/auto_tutorial/plot_ngrams.doctree new file mode 100644 index 000000000..c9d5c071d Binary files /dev/null and b/.doctrees/auto_tutorial/plot_ngrams.doctree differ diff --git a/.doctrees/auto_tutorial/plot_pextend_python_runtime.doctree b/.doctrees/auto_tutorial/plot_pextend_python_runtime.doctree new file mode 100644 index 000000000..675172347 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_pextend_python_runtime.doctree differ diff --git a/.doctrees/auto_tutorial/plot_qextend_onnxruntime.doctree b/.doctrees/auto_tutorial/plot_qextend_onnxruntime.doctree new file mode 100644 index 000000000..b708f5a12 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_qextend_onnxruntime.doctree differ diff --git a/.doctrees/auto_tutorial/plot_transformer_discrepancy.doctree b/.doctrees/auto_tutorial/plot_transformer_discrepancy.doctree new file mode 100644 index 000000000..536b583bd Binary files /dev/null and b/.doctrees/auto_tutorial/plot_transformer_discrepancy.doctree differ diff --git a/.doctrees/auto_tutorial/plot_usparse_xgboost.doctree b/.doctrees/auto_tutorial/plot_usparse_xgboost.doctree new file mode 100644 index 000000000..4267bfb9d Binary files /dev/null and b/.doctrees/auto_tutorial/plot_usparse_xgboost.doctree differ diff --git a/.doctrees/auto_tutorial/plot_wext_pyod_forest.doctree b/.doctrees/auto_tutorial/plot_wext_pyod_forest.doctree new file mode 100644 index 000000000..f7dfb4b08 Binary files /dev/null and b/.doctrees/auto_tutorial/plot_wext_pyod_forest.doctree differ diff --git a/.doctrees/auto_tutorial/plot_woe_transformer.doctree b/.doctrees/auto_tutorial/plot_woe_transformer.doctree new file mode 100644 index 000000000..361fa78fb Binary files /dev/null and b/.doctrees/auto_tutorial/plot_woe_transformer.doctree differ diff --git a/.doctrees/auto_tutorial/sg_execution_times.doctree b/.doctrees/auto_tutorial/sg_execution_times.doctree new file mode 100644 index 000000000..a121fb9a8 Binary files /dev/null and b/.doctrees/auto_tutorial/sg_execution_times.doctree differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle new file mode 100644 index 000000000..fb8bd2302 Binary files /dev/null and b/.doctrees/environment.pickle differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree new file mode 100644 index 000000000..d143428e8 Binary files /dev/null and b/.doctrees/index.doctree differ diff --git a/.doctrees/index_tutorial.doctree b/.doctrees/index_tutorial.doctree new file mode 100644 index 000000000..684bbb88b Binary files /dev/null and b/.doctrees/index_tutorial.doctree differ diff --git a/.doctrees/introduction.doctree b/.doctrees/introduction.doctree new file mode 100644 index 000000000..510f04810 Binary files /dev/null and b/.doctrees/introduction.doctree differ diff --git a/.doctrees/parameterized.doctree b/.doctrees/parameterized.doctree new file mode 100644 index 000000000..4119fbf2a Binary files /dev/null and b/.doctrees/parameterized.doctree differ diff --git a/.doctrees/pipeline.doctree b/.doctrees/pipeline.doctree new file mode 100644 index 000000000..2bf933013 Binary files /dev/null and b/.doctrees/pipeline.doctree differ diff --git a/.doctrees/supported.doctree b/.doctrees/supported.doctree new file mode 100644 index 000000000..4d05923f3 Binary files /dev/null and b/.doctrees/supported.doctree differ diff --git a/.doctrees/tutorial_1-5_external.doctree b/.doctrees/tutorial_1-5_external.doctree new file mode 100644 index 000000000..9b42c63ea Binary files /dev/null and b/.doctrees/tutorial_1-5_external.doctree differ diff --git a/.doctrees/tutorial_1_simple.doctree b/.doctrees/tutorial_1_simple.doctree new file mode 100644 index 000000000..abfcd12c7 Binary files /dev/null and b/.doctrees/tutorial_1_simple.doctree differ diff --git a/.doctrees/tutorial_2-5_extlib.doctree b/.doctrees/tutorial_2-5_extlib.doctree new file mode 100644 index 000000000..b4b6202d3 Binary files /dev/null and b/.doctrees/tutorial_2-5_extlib.doctree differ diff --git a/.doctrees/tutorial_2_new_converter.doctree b/.doctrees/tutorial_2_new_converter.doctree new file mode 100644 index 000000000..2d6d43282 Binary files /dev/null and b/.doctrees/tutorial_2_new_converter.doctree differ diff --git a/.doctrees/tutorial_3_new_operator.doctree b/.doctrees/tutorial_3_new_operator.doctree new file mode 100644 index 000000000..6027d4e1f Binary files /dev/null and b/.doctrees/tutorial_3_new_operator.doctree differ diff --git a/.doctrees/tutorial_4_advanced.doctree b/.doctrees/tutorial_4_advanced.doctree new file mode 100644 index 000000000..0849e8210 Binary files /dev/null and b/.doctrees/tutorial_4_advanced.doctree differ diff --git a/_downloads/01727087b155e5345657ebbe183f11e3/plot_gbegin_cst.ipynb b/_downloads/01727087b155e5345657ebbe183f11e3/plot_gbegin_cst.ipynb index bab4353d9..959e0d93b 100644 --- a/_downloads/01727087b155e5345657ebbe183f11e3/plot_gbegin_cst.ipynb +++ b/_downloads/01727087b155e5345657ebbe183f11e3/plot_gbegin_cst.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Store arrays in one onnx graph\n\nOnce a model is converted it can be useful to store an\narray as a constant in the graph an retrieve it through\nan output. This allows the user to store training parameters\nor other informations like a vocabulary.\nLast sections shows how to remove an output or to promote\nan intermediate result to an output.\n\n## Train and convert a model\n\nWe download one model from the :epkg:`ONNX Zoo` but the model\ncould be trained and produced by another converter library.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import pprint\nimport numpy\nfrom onnx import load\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\nfrom skl2onnx.helpers.onnx_helper import (\n add_output_initializer, select_model_inputs_outputs)\n\n\ndata = load_iris()\nX, y = data.data.astype(numpy.float32), data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nmodel = LogisticRegression(penalty='elasticnet', C=2.,\n solver='saga', l1_ratio=0.5)\nmodel.fit(X_train, y_train)\n\nonx = to_onnx(model, X_train[:1], target_opset=12,\n options={'zipmap': False})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Add training parameter\n\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "new_onx = add_output_initializer(\n onx,\n ['C', 'l1_ratio'],\n [numpy.array([model.C]), numpy.array([model.l1_ratio])])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Inference\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(new_onx.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The major draw back of this solution is increase the prediction\ntime as onnxruntime copies the constants for every prediction.\nIt is possible either to store those constant in a separate ONNX graph\nor to removes them.\n\n## Select outputs\n\nNext function removes unneeded outputs from a model,\nnot only the constants. Next model only keeps the probabilities.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "simple_onx = select_model_inputs_outputs(new_onx, ['probabilities'])\n\nsess = InferenceSession(simple_onx.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)\n\n# Function *select_model_inputs_outputs* add also promote an intermediate\n# result to an output.\n#" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example only uses ONNX graph in memory and never saves or loads a\nmodel. This can be done by using the following snippets of code.\n\n## Save a model\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "with open(\"simplified_model.onnx\", \"wb\") as f:\n f.write(simple_onx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load a model\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model = load(\"simplified_model.onnx\", \"wb\")\n\nsess = InferenceSession(model.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Store arrays in one onnx graph\n\nOnce a model is converted it can be useful to store an\narray as a constant in the graph an retrieve it through\nan output. This allows the user to store training parameters\nor other informations like a vocabulary.\nLast sections shows how to remove an output or to promote\nan intermediate result to an output.\n\n## Train and convert a model\n\nWe download one model from the :epkg:`ONNX Zoo` but the model\ncould be trained and produced by another converter library.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import pprint\nimport numpy\nfrom onnx import load\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\nfrom skl2onnx.helpers.onnx_helper import (\n add_output_initializer, select_model_inputs_outputs)\n\n\ndata = load_iris()\nX, y = data.data.astype(numpy.float32), data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nmodel = LogisticRegression(penalty='elasticnet', C=2.,\n solver='saga', l1_ratio=0.5)\nmodel.fit(X_train, y_train)\n\nonx = to_onnx(model, X_train[:1], target_opset=12,\n options={'zipmap': False})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Add training parameter\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "new_onx = add_output_initializer(\n onx,\n ['C', 'l1_ratio'],\n [numpy.array([model.C]), numpy.array([model.l1_ratio])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inference\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(new_onx.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The major draw back of this solution is increase the prediction\ntime as onnxruntime copies the constants for every prediction.\nIt is possible either to store those constant in a separate ONNX graph\nor to removes them.\n\n## Select outputs\n\nNext function removes unneeded outputs from a model,\nnot only the constants. Next model only keeps the probabilities.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "simple_onx = select_model_inputs_outputs(new_onx, ['probabilities'])\n\nsess = InferenceSession(simple_onx.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)\n\n# Function *select_model_inputs_outputs* add also promote an intermediate\n# result to an output.\n#" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example only uses ONNX graph in memory and never saves or loads a\nmodel. This can be done by using the following snippets of code.\n\n## Save a model\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "with open(\"simplified_model.onnx\", \"wb\") as f:\n f.write(simple_onx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load a model\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model = load(\"simplified_model.onnx\", \"wb\")\n\nsess = InferenceSession(model.SerializeToString())\nprint(\"output names:\", [o.name for o in sess.get_outputs()])\nres = sess.run(None, {'X': X_test[:2]})\nprint(\"outputs\")\npprint.pprint(res)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/019b6ce28bee42f2ce8d8b76d294107e/plot_tfidfvectorizer.ipynb b/_downloads/019b6ce28bee42f2ce8d8b76d294107e/plot_tfidfvectorizer.ipynb index 9cc7c0ee0..ae0d65484 100644 --- a/_downloads/019b6ce28bee42f2ce8d8b76d294107e/plot_tfidfvectorizer.ipynb +++ b/_downloads/019b6ce28bee42f2ce8d8b76d294107e/plot_tfidfvectorizer.ipynb @@ -1,187 +1,187 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# TfIdfVectorizer with ONNX\n\nThis example is inspired from the following example:\n`Column Transformer with Heterogeneous Data Sources\n`_\nwhich builds a pipeline to classify text.\n\n## Train a pipeline with TfidfVectorizer\n\nIt replicates the same pipeline taken from *scikit-learn* documentation\nbut reduces it to the part ONNX actually supports without implementing\na custom converter. Let's get the data.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx.common.data_types import StringTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.datasets import fetch_20newsgroups\ntry:\n from sklearn.datasets._twenty_newsgroups import (\n strip_newsgroup_footer, strip_newsgroup_quoting)\nexcept ImportError:\n # scikit-learn < 0.24\n from sklearn.datasets.twenty_newsgroups import (\n strip_newsgroup_footer, strip_newsgroup_quoting)\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\n\n\n# limit the list of categories to make running this example faster.\ncategories = ['alt.atheism', 'talk.religion.misc']\ntrain = fetch_20newsgroups(random_state=1,\n subset='train',\n categories=categories,\n )\ntest = fetch_20newsgroups(random_state=1,\n subset='test',\n categories=categories,\n )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first transform extract two fields from the data.\nWe take it out form the pipeline and assume\nthe data is defined by two text columns.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "class SubjectBodyExtractor(BaseEstimator, TransformerMixin):\n \"\"\"Extract the subject & body from a usenet post in a single pass.\n Takes a sequence of strings and produces a dict of sequences. Keys are\n `subject` and `body`.\n \"\"\"\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, posts):\n # construct object dtype array with two columns\n # first column = 'subject' and second column = 'body'\n features = np.empty(shape=(len(posts), 2), dtype=object)\n for i, text in enumerate(posts):\n headers, _, bod = text.partition('\\n\\n')\n bod = strip_newsgroup_footer(bod)\n bod = strip_newsgroup_quoting(bod)\n features[i, 1] = bod\n\n prefix = 'Subject:'\n sub = ''\n for line in headers.split('\\n'):\n if line.startswith(prefix):\n sub = line[len(prefix):]\n break\n features[i, 0] = sub\n\n return features\n\n\ntrain_data = SubjectBodyExtractor().fit_transform(train.data)\ntest_data = SubjectBodyExtractor().fit_transform(test.data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The pipeline is almost the same except\nwe remove the custom features.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pipeline = Pipeline([\n ('union', ColumnTransformer(\n [\n ('subject', TfidfVectorizer(min_df=50), 0),\n\n ('body_bow', Pipeline([\n ('tfidf', TfidfVectorizer()),\n ('best', TruncatedSVD(n_components=50)),\n ]), 1),\n\n # Removed from the original example as\n # it requires a custom converter.\n # ('body_stats', Pipeline([\n # ('stats', TextStats()), # returns a list of dicts\n # ('vect', DictVectorizer()), # list of dicts -> feature matrix\n # ]), 1),\n ],\n\n transformer_weights={\n 'subject': 0.8,\n 'body_bow': 0.5,\n # 'body_stats': 1.0,\n }\n )),\n\n # Use a LogisticRegression classifier on the combined features.\n # Instead of LinearSVC (not fully ready in onnxruntime).\n ('logreg', LogisticRegression()),\n])\n\npipeline.fit(train_data, train.target)\nprint(classification_report(pipeline.predict(test_data), test.target))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX conversion\n\nIt is difficult to replicate the exact same tokenizer\nbehaviour if the tokeniser comes from space, gensim or nltk.\nThe default one used by *scikit-learn* uses regular expressions\nand is currently being implementing. The current implementation\nonly considers a list of separators which can is defined\nin variable *seps*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "seps = {\n TfidfVectorizer: {\n \"separators\": [\n ' ', '.', '\\\\?', ',', ';', ':', '!',\n '\\\\(', '\\\\)', '\\n', '\"', \"'\",\n \"-\", \"\\\\[\", \"\\\\]\", \"@\"\n ]\n }\n}\nmodel_onnx = convert_sklearn(\n pipeline, \"tfidf\",\n initial_types=[(\"input\", StringTensorType([None, 2]))],\n options=seps, target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And save.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "with open(\"pipeline_tfidf.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_tfidf.onnx\")\nprint('---', train_data[0])\ninputs = {'input': train_data[:1]}\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With *scikit-learn*:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(pipeline.predict(train_data[:1]))\nprint(pipeline.predict_proba(train_data[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are discrepencies for this model because\nthe tokenization is not exactly the same.\nThis is a work in progress.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\", node_producer=GetOpNodeProducer(\"docstring\",\n color=\"yellow\",\n fillcolor=\"yellow\",\n style=\"filled\"))\npydot_graph.write_dot(\"pipeline_tfidf.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_tfidf.dot')\n\nimage = plt.imread(\"pipeline_tfidf.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# TfIdfVectorizer with ONNX\n\nThis example is inspired from the following example:\n[Column Transformer with Heterogeneous Data Sources](https://scikit-learn.org/stable/auto_examples/\ncompose/plot_column_transformer.html)\nwhich builds a pipeline to classify text.\n\n## Train a pipeline with TfidfVectorizer\n\nIt replicates the same pipeline taken from *scikit-learn* documentation\nbut reduces it to the part ONNX actually supports without implementing\na custom converter. Let's get the data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx.common.data_types import StringTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.datasets import fetch_20newsgroups\ntry:\n from sklearn.datasets._twenty_newsgroups import (\n strip_newsgroup_footer, strip_newsgroup_quoting)\nexcept ImportError:\n # scikit-learn < 0.24\n from sklearn.datasets.twenty_newsgroups import (\n strip_newsgroup_footer, strip_newsgroup_quoting)\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\n\n\n# limit the list of categories to make running this example faster.\ncategories = ['alt.atheism', 'talk.religion.misc']\ntrain = fetch_20newsgroups(random_state=1,\n subset='train',\n categories=categories,\n )\ntest = fetch_20newsgroups(random_state=1,\n subset='test',\n categories=categories,\n )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first transform extract two fields from the data.\nWe take it out form the pipeline and assume\nthe data is defined by two text columns.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class SubjectBodyExtractor(BaseEstimator, TransformerMixin):\n \"\"\"Extract the subject & body from a usenet post in a single pass.\n Takes a sequence of strings and produces a dict of sequences. Keys are\n `subject` and `body`.\n \"\"\"\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, posts):\n # construct object dtype array with two columns\n # first column = 'subject' and second column = 'body'\n features = np.empty(shape=(len(posts), 2), dtype=object)\n for i, text in enumerate(posts):\n headers, _, bod = text.partition('\\n\\n')\n bod = strip_newsgroup_footer(bod)\n bod = strip_newsgroup_quoting(bod)\n features[i, 1] = bod\n\n prefix = 'Subject:'\n sub = ''\n for line in headers.split('\\n'):\n if line.startswith(prefix):\n sub = line[len(prefix):]\n break\n features[i, 0] = sub\n\n return features\n\n\ntrain_data = SubjectBodyExtractor().fit_transform(train.data)\ntest_data = SubjectBodyExtractor().fit_transform(test.data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The pipeline is almost the same except\nwe remove the custom features.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pipeline = Pipeline([\n ('union', ColumnTransformer(\n [\n ('subject', TfidfVectorizer(min_df=50, max_features=500), 0),\n\n ('body_bow', Pipeline([\n ('tfidf', TfidfVectorizer()),\n ('best', TruncatedSVD(n_components=50)),\n ]), 1),\n\n # Removed from the original example as\n # it requires a custom converter.\n # ('body_stats', Pipeline([\n # ('stats', TextStats()), # returns a list of dicts\n # ('vect', DictVectorizer()), # list of dicts -> feature matrix\n # ]), 1),\n ],\n\n transformer_weights={\n 'subject': 0.8,\n 'body_bow': 0.5,\n # 'body_stats': 1.0,\n }\n )),\n\n # Use a LogisticRegression classifier on the combined features.\n # Instead of LinearSVC (not fully ready in onnxruntime).\n ('logreg', LogisticRegression()),\n])\n\npipeline.fit(train_data, train.target)\nprint(classification_report(pipeline.predict(test_data), test.target))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX conversion\n\nIt is difficult to replicate the exact same tokenizer\nbehaviour if the tokeniser comes from space, gensim or nltk.\nThe default one used by *scikit-learn* uses regular expressions\nand is currently being implementing. The current implementation\nonly considers a list of separators which can is defined\nin variable *seps*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "seps = {\n TfidfVectorizer: {\n \"separators\": [\n ' ', '.', '\\\\?', ',', ';', ':', '!',\n '\\\\(', '\\\\)', '\\n', '\"', \"'\",\n \"-\", \"\\\\[\", \"\\\\]\", \"@\"\n ]\n }\n}\nmodel_onnx = convert_sklearn(\n pipeline, \"tfidf\",\n initial_types=[(\"input\", StringTensorType([None, 2]))],\n options=seps, target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And save.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "with open(\"pipeline_tfidf.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_tfidf.onnx\")\nprint('---', train_data[0])\ninputs = {'input': train_data[:1]}\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With *scikit-learn*:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(pipeline.predict(train_data[:1]))\nprint(pipeline.predict_proba(train_data[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are discrepencies for this model because\nthe tokenization is not exactly the same.\nThis is a work in progress.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\", node_producer=GetOpNodeProducer(\"docstring\",\n color=\"yellow\",\n fillcolor=\"yellow\",\n style=\"filled\"))\npydot_graph.write_dot(\"pipeline_tfidf.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_tfidf.dot')\n\nimage = plt.imread(\"pipeline_tfidf.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/02fbcef0453739ff6a0579b817c59a3e/auto_tutorial_python.zip b/_downloads/02fbcef0453739ff6a0579b817c59a3e/auto_tutorial_python.zip index 47fd551d5..1cba952a2 100644 Binary files a/_downloads/02fbcef0453739ff6a0579b817c59a3e/auto_tutorial_python.zip and b/_downloads/02fbcef0453739ff6a0579b817c59a3e/auto_tutorial_python.zip differ diff --git a/_downloads/03289b8dfb67c831212cde166d46b95d/plot_onnx_operators.ipynb b/_downloads/03289b8dfb67c831212cde166d46b95d/plot_onnx_operators.ipynb index 5e20e2c5d..72e8efe95 100644 --- a/_downloads/03289b8dfb67c831212cde166d46b95d/plot_onnx_operators.ipynb +++ b/_downloads/03289b8dfb67c831212cde166d46b95d/plot_onnx_operators.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Play with ONNX operators\n\nONNX aims at describing most of the machine learning models\nimplemented in *scikit-learn* but it does not necessarily describe\nthe prediction function the same way *scikit-learn* does.\nIf it is possible to define custom operators, it usually\nrequires some time to add it to ONNX specifications and then to\nthe backend used to compute the predictions. It is better to look\nfirst if the existing operators can be used. The list is available\non *github* and gives the `basic operators\n`_\nand others `dedicated to machine learning\n`_.\n*ONNX* has a Python API which can be used to define an *ONNX*\ngraph: `PythonAPIOverview.md\n`_.\nBut it is quite verbose and makes it difficult to describe big graphs.\n*sklearn-onnx* implements a nicer way to test *ONNX* operators.\n\n## ONNX Python API\n\nLet's try the example given by ONNX documentation:\n`ONNX Model Using Helper Functions\n`_.\nIt relies on *protobuf* whose definition can be found\non github `onnx.proto\n`_.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import onnxruntime\nimport numpy\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport onnx\nfrom onnx import helper\nfrom onnx import TensorProto\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\n\n# Create one input (ValueInfoProto)\nX = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 2])\n\n# Create one output (ValueInfoProto)\nY = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 4])\n\n# Create a node (NodeProto)\nnode_def = helper.make_node(\n 'Pad', # node name\n ['X'], # inputs\n ['Y'], # outputs\n mode='constant', # attributes\n value=1.5,\n pads=[0, 1, 0, 1],\n)\n\n# Create the graph (GraphProto)\ngraph_def = helper.make_graph(\n [node_def],\n 'test-model',\n [X],\n [Y],\n)\n\n# Create the model (ModelProto)\nmodel_def = helper.make_model(graph_def, producer_name='onnx-example')\nmodel_def.opset_import[0].version = 10\n\nprint('The model is:\\n{}'.format(model_def))\nonnx.checker.check_model(model_def)\nprint('The model is checked!')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Same example with sklearn-onnx\n\nEvery operator has its own class in *sklearn-onnx*.\nThe list is dynamically created based on the installed\nonnx package.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from skl2onnx.algebra.onnx_ops import OnnxPad # noqa\n\npad = OnnxPad('X', output_names=['Y'], mode='constant', value=1.5,\n pads=[0, 1, 0, 1], op_version=10)\nmodel_def = pad.to_onnx({'X': X}, target_opset=10)\n\nprint('The model is:\\n{}'.format(model_def))\nonnx.checker.check_model(model_def)\nprint('The model is checked!')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Inputs and outputs can also be skipped.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pad = OnnxPad(mode='constant', value=1.5,\n pads=[0, 1, 0, 1], op_version=10)\n\nmodel_def = pad.to_onnx({pad.inputs[0].name: X}, target_opset=10)\nonnx.checker.check_model(model_def)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Multiple operators\n\nLet's use the second example from the documentation.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Preprocessing: create a model with two nodes, Y's shape is unknown\nnode1 = helper.make_node('Transpose', ['X'], ['Y'], perm=[1, 0, 2])\nnode2 = helper.make_node('Transpose', ['Y'], ['Z'], perm=[1, 0, 2])\n\ngraph = helper.make_graph(\n [node1, node2],\n 'two-transposes',\n [helper.make_tensor_value_info('X', TensorProto.FLOAT, (2, 3, 4))],\n [helper.make_tensor_value_info('Z', TensorProto.FLOAT, (2, 3, 4))],\n)\n\noriginal_model = helper.make_model(graph, producer_name='onnx-examples')\n\n# Check the model and print Y's shape information\nonnx.checker.check_model(original_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Which we translate into:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from skl2onnx.algebra.onnx_ops import OnnxTranspose # noqa\n\nnode = OnnxTranspose(\n OnnxTranspose('X', perm=[1, 0, 2], op_version=12),\n perm=[1, 0, 2], op_version=12)\nX = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.float32)\n\n# numpy arrays are good enough to define the input shape\nmodel_def = node.to_onnx({'X': X}, target_opset=12)\nonnx.checker.check_model(model_def)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's the output with onnxruntime\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def predict_with_onnxruntime(model_def, *inputs):\n import onnxruntime as ort\n sess = ort.InferenceSession(model_def.SerializeToString())\n names = [i.name for i in sess.get_inputs()]\n dinputs = {name: input for name, input in zip(names, inputs)}\n res = sess.run(None, dinputs)\n names = [o.name for o in sess.get_outputs()]\n return {name: output for name, output in zip(names, res)}\n\n\nY = predict_with_onnxruntime(model_def, X)\nprint(Y)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_def.graph, name=model_def.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_transpose2x.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_transpose2x.dot')\n\nimage = plt.imread(\"pipeline_transpose2x.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Play with ONNX operators\n\nONNX aims at describing most of the machine learning models\nimplemented in *scikit-learn* but it does not necessarily describe\nthe prediction function the same way *scikit-learn* does.\nIf it is possible to define custom operators, it usually\nrequires some time to add it to ONNX specifications and then to\nthe backend used to compute the predictions. It is better to look\nfirst if the existing operators can be used. The list is available\non *github* and gives the [basic operators](https://github.com/onnx/onnx/blob/master/docs/Operators.md)\nand others [dedicated to machine learning](https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md).\n*ONNX* has a Python API which can be used to define an *ONNX*\ngraph: [PythonAPIOverview.md](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md).\nBut it is quite verbose and makes it difficult to describe big graphs.\n*sklearn-onnx* implements a nicer way to test *ONNX* operators.\n\n## ONNX Python API\n\nLet's try the example given by ONNX documentation:\n[ONNX Model Using Helper Functions](https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md\n#creating-an-onnx-model-using-helper-functions).\nIt relies on *protobuf* whose definition can be found\non github [onnx.proto](https://github.com/onnx/onnx/blob/master/onnx/onnx.proto).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import onnxruntime\nimport numpy\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport onnx\nfrom onnx import helper\nfrom onnx import TensorProto\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\n\n# Create one input (ValueInfoProto)\nX = helper.make_tensor_value_info('X', TensorProto.FLOAT, [None, 2])\n\n# Create one output (ValueInfoProto)\nY = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [None, 4])\n\n# Create a node (NodeProto)\nnode_def = helper.make_node(\n 'Pad', # node name\n ['X'], # inputs\n ['Y'], # outputs\n mode='constant', # attributes\n value=1.5,\n pads=[0, 1, 0, 1],\n)\n\n# Create the graph (GraphProto)\ngraph_def = helper.make_graph(\n [node_def],\n 'test-model',\n [X],\n [Y],\n)\n\n# Create the model (ModelProto)\nmodel_def = helper.make_model(graph_def, producer_name='onnx-example')\nmodel_def.opset_import[0].version = 10\n\nprint('The model is:\\n{}'.format(model_def))\nonnx.checker.check_model(model_def)\nprint('The model is checked!')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Same example with sklearn-onnx\n\nEvery operator has its own class in *sklearn-onnx*.\nThe list is dynamically created based on the installed\nonnx package.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from skl2onnx.algebra.onnx_ops import OnnxPad # noqa\n\npad = OnnxPad('X', output_names=['Y'], mode='constant', value=1.5,\n pads=[0, 1, 0, 1], op_version=10)\nmodel_def = pad.to_onnx({'X': X}, target_opset=10)\n\nprint('The model is:\\n{}'.format(model_def))\nonnx.checker.check_model(model_def)\nprint('The model is checked!')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Inputs and outputs can also be skipped.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pad = OnnxPad(mode='constant', value=1.5,\n pads=[0, 1, 0, 1], op_version=10)\n\nmodel_def = pad.to_onnx({pad.inputs[0].name: X}, target_opset=10)\nonnx.checker.check_model(model_def)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multiple operators\n\nLet's use the second example from the documentation.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Preprocessing: create a model with two nodes, Y's shape is unknown\nnode1 = helper.make_node('Transpose', ['X'], ['Y'], perm=[1, 0, 2])\nnode2 = helper.make_node('Transpose', ['Y'], ['Z'], perm=[1, 0, 2])\n\ngraph = helper.make_graph(\n [node1, node2],\n 'two-transposes',\n [helper.make_tensor_value_info('X', TensorProto.FLOAT, (2, 3, 4))],\n [helper.make_tensor_value_info('Z', TensorProto.FLOAT, (2, 3, 4))],\n)\n\noriginal_model = helper.make_model(graph, producer_name='onnx-examples')\n\n# Check the model and print Y's shape information\nonnx.checker.check_model(original_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Which we translate into:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from skl2onnx.algebra.onnx_ops import OnnxTranspose # noqa\n\nnode = OnnxTranspose(\n OnnxTranspose('X', perm=[1, 0, 2], op_version=12),\n perm=[1, 0, 2], op_version=12)\nX = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.float32)\n\n# numpy arrays are good enough to define the input shape\nmodel_def = node.to_onnx({'X': X}, target_opset=12)\nonnx.checker.check_model(model_def)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's the output with onnxruntime\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def predict_with_onnxruntime(model_def, *inputs):\n import onnxruntime as ort\n sess = ort.InferenceSession(model_def.SerializeToString())\n names = [i.name for i in sess.get_inputs()]\n dinputs = {name: input for name, input in zip(names, inputs)}\n res = sess.run(None, dinputs)\n names = [o.name for o in sess.get_outputs()]\n return {name: output for name, output in zip(names, res)}\n\n\nY = predict_with_onnxruntime(model_def, X)\nprint(Y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_def.graph, name=model_def.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_transpose2x.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_transpose2x.dot')\n\nimage = plt.imread(\"pipeline_transpose2x.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/04cffed4ebc9246bef74301fa6f3e2c1/plot_kcustom_converter_wrapper.py b/_downloads/04cffed4ebc9246bef74301fa6f3e2c1/plot_kcustom_converter_wrapper.py index 03ded1b3c..d484c0094 100644 --- a/_downloads/04cffed4ebc9246bef74301fa6f3e2c1/plot_kcustom_converter_wrapper.py +++ b/_downloads/04cffed4ebc9246bef74301fa6f3e2c1/plot_kcustom_converter_wrapper.py @@ -16,9 +16,6 @@ stable/modules/generated/sklearn.decomposition.PCA.html>`_. We could then reuse the converter associated to this model. -.. contents:: - :local: - Custom model ++++++++++++ diff --git a/_downloads/04dd33aeb9eb841ffc4c0ac9d4cfab1e/plot_cbegin_opset.ipynb b/_downloads/04dd33aeb9eb841ffc4c0ac9d4cfab1e/plot_cbegin_opset.ipynb index 28e1dfe5e..e4c96142c 100644 --- a/_downloads/04dd33aeb9eb841ffc4c0ac9d4cfab1e/plot_cbegin_opset.ipynb +++ b/_downloads/04dd33aeb9eb841ffc4c0ac9d4cfab1e/plot_cbegin_opset.ipynb @@ -1,133 +1,133 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# What is the opset number?\n\n.. index:: opset, target opset, version\n\nEvery library is versioned. :epkg:`scikit-learn` may change\nthe implementation of a specific model. That happens\nfor example with the `SVC `_ model where\nthe parameter *break_ties* was added in 0.22. :epkg:`ONNX`\ndoes also have a version called *opset number*.\nOperator *ArgMin* was added in opset 1 and changed in opset\n11, 12, 13. Sometimes, it is updated to extend the list\nof types it supports, sometimes, it moves a parameter\ninto the input list. The runtime used to deploy the model\ndoes not implement a new version, in that case, a model\nmust be converted by usually using the most recent opset\nsupported by the runtime, we call that opset the\n*targeted opset*. An ONNX graph only contains\none unique opset, every node must be described following\nthe specifications defined by the latest opset below the\ntargeted opset.\n\nThis example considers an `IsolationForest\n`_ and digs into opsets.\n\n## Data\n\nA simple example.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from onnx.defs import onnx_opset_version\nfrom skl2onnx import to_onnx\nimport numpy\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=100, n_features=2)\n\nmodel = IsolationForest(n_estimators=3)\nmodel.fit(X)\nlabels = model.predict(X)\n\nfig, ax = plt.subplots(1, 1)\nfor k in (-1, 1):\n ax.plot(X[labels == k, 0], X[labels == k, 1], 'o', label=\"cl%d\" % k)\nax.set_title(\"Sample\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(model, X[:1].astype(numpy.float32),\n target_opset={'': 15, 'ai.onnx.ml': 2})\nprint(onx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The last line shows the opsets.\nLet's extract it.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "domains = onx.opset_import\nfor dom in domains:\n print(\"domain: %r, version: %r\" % (dom.domain, dom.version))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are two opsets, one for standard operators,\nthe other for machine learning operators.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX and opset\n\nThe converter can convert a model to an older opset\nthan the default one, from 1 to the last available one.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def get_domain_opset(onx):\n domains = onx.opset_import\n res = [{'domain': dom.domain, 'version': dom.version}\n for dom in domains]\n return {d['domain']: d['version'] for d in res}\n\n\nfor opset in range(6, onnx_opset_version() + 1):\n try:\n onx = to_onnx(model, X[:1].astype(numpy.float32),\n target_opset={'': opset, 'ai.onnx.ml': 2})\n except RuntimeError as e:\n print('target: %r error: %r' % (opset, e))\n continue\n nodes = len(onx.graph.node)\n print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It shows that the model cannot be converted for opset\nbelow 5. Operator `Reshape `_ changed in\nopset 5: a parameter became an input. The converter\ndoes not support *opset < 5* because runtimes usually do not.\n\n## Other opsets\n\nThe previous example changed the opset of the main domain\n``''`` but the other opset domain can be changed as well.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for opset in range(9, onnx_opset_version() + 1):\n for opset_ml in range(1, 4):\n tops = {'': opset, 'ai.onnx.ml': opset_ml}\n try:\n print(\"try target_opset:\", tops)\n onx = to_onnx(\n model, X[:1].astype(numpy.float32), target_opset=tops)\n except RuntimeError as e:\n print('target: %r error: %r' % (opset, e))\n continue\n nodes = len(onx.graph.node)\n print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# What is the opset number?\n\n.. index:: opset, target opset, version\n\nEvery library is versioned. :epkg:`scikit-learn` may change\nthe implementation of a specific model. That happens\nfor example with the [SVC](https://scikit-learn.org/stable/\nmodules/generated/sklearn.svm.SVC.html) model where\nthe parameter *break_ties* was added in 0.22. :epkg:`ONNX`\ndoes also have a version called *opset number*.\nOperator *ArgMin* was added in opset 1 and changed in opset\n11, 12, 13. Sometimes, it is updated to extend the list\nof types it supports, sometimes, it moves a parameter\ninto the input list. The runtime used to deploy the model\ndoes not implement a new version, in that case, a model\nmust be converted by usually using the most recent opset\nsupported by the runtime, we call that opset the\n*targeted opset*. An ONNX graph only contains\none unique opset, every node must be described following\nthe specifications defined by the latest opset below the\ntargeted opset.\n\nThis example considers an [IsolationForest](https://scikit-learn.org/stable/modules/generated/\nsklearn.ensemble.IsolationForest.html) and digs into opsets.\n\n## Data\n\nA simple example.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from onnx.defs import onnx_opset_version\nfrom skl2onnx import to_onnx\nimport numpy\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=100, n_features=2)\n\nmodel = IsolationForest(n_estimators=3)\nmodel.fit(X)\nlabels = model.predict(X)\n\nfig, ax = plt.subplots(1, 1)\nfor k in (-1, 1):\n ax.plot(X[labels == k, 0], X[labels == k, 1], 'o', label=\"cl%d\" % k)\nax.set_title(\"Sample\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(model, X[:1].astype(numpy.float32),\n target_opset={'': 15, 'ai.onnx.ml': 2})\nprint(onx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last line shows the opsets.\nLet's extract it.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "domains = onx.opset_import\nfor dom in domains:\n print(\"domain: %r, version: %r\" % (dom.domain, dom.version))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are two opsets, one for standard operators,\nthe other for machine learning operators.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX and opset\n\nThe converter can convert a model to an older opset\nthan the default one, from 1 to the last available one.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def get_domain_opset(onx):\n domains = onx.opset_import\n res = [{'domain': dom.domain, 'version': dom.version}\n for dom in domains]\n return {d['domain']: d['version'] for d in res}\n\n\nfor opset in range(6, onnx_opset_version() + 1):\n try:\n onx = to_onnx(model, X[:1].astype(numpy.float32),\n target_opset={'': opset, 'ai.onnx.ml': 2})\n except RuntimeError as e:\n print('target: %r error: %r' % (opset, e))\n continue\n nodes = len(onx.graph.node)\n print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It shows that the model cannot be converted for opset\nbelow 5. Operator [Reshape](https://github.com/onnx/\nonnx/blob/master/docs/Operators.md#Reshape) changed in\nopset 5: a parameter became an input. The converter\ndoes not support *opset < 5* because runtimes usually do not.\n\n## Other opsets\n\nThe previous example changed the opset of the main domain\n``''`` but the other opset domain can be changed as well.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for opset in range(9, onnx_opset_version() + 1):\n for opset_ml in range(1, 4):\n tops = {'': opset, 'ai.onnx.ml': opset_ml}\n try:\n print(\"try target_opset:\", tops)\n onx = to_onnx(\n model, X[:1].astype(numpy.float32), target_opset=tops)\n except RuntimeError as e:\n print('target: %r error: %r' % (opset, e))\n continue\n nodes = len(onx.graph.node)\n print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/0638b5f714206e2515de6a60865a5a84/plot_cbegin_opset.py b/_downloads/0638b5f714206e2515de6a60865a5a84/plot_cbegin_opset.py index f17db915c..89f3a6458 100644 --- a/_downloads/0638b5f714206e2515de6a60865a5a84/plot_cbegin_opset.py +++ b/_downloads/0638b5f714206e2515de6a60865a5a84/plot_cbegin_opset.py @@ -28,9 +28,6 @@ `_ and digs into opsets. -.. contents:: - :local: - Data ++++ diff --git a/_downloads/06e3e29bf97a1cb5af418c42b2bb7c06/plot_gconverting.py b/_downloads/06e3e29bf97a1cb5af418c42b2bb7c06/plot_gconverting.py index 9a6442779..880444bc6 100644 --- a/_downloads/06e3e29bf97a1cb5af418c42b2bb7c06/plot_gconverting.py +++ b/_downloads/06e3e29bf97a1cb5af418c42b2bb7c06/plot_gconverting.py @@ -7,9 +7,6 @@ This example shows how to change the default ONNX graph such as renaming the inputs or outputs names. -.. contents:: - :local: - Basic example +++++++++++++ @@ -31,7 +28,8 @@ clr.fit(X_train, y_train) -onx = to_onnx(clr, X, options={'zipmap': False}) +onx = to_onnx(clr, X, options={'zipmap': False}, + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -49,7 +47,8 @@ # types as well. onx = to_onnx(clr, X, options={'zipmap': False}, - initial_types=[('X56', FloatTensorType([None, X.shape[1]]))]) + initial_types=[('X56', FloatTensorType([None, X.shape[1]]))], + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -67,7 +66,8 @@ onx = to_onnx(clr, X, options={'zipmap': False}, final_types=[('L', Int64TensorType([None])), - ('P', FloatTensorType([None, 3]))]) + ('P', FloatTensorType([None, 3]))], + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -93,7 +93,7 @@ def rename_results(proposed_name, existing_names): onx = to_onnx(clr, X, options={'zipmap': False}, - naming=rename_results) + naming=rename_results, target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] diff --git a/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip b/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip index 8ca5e9e24..556f23bde 100644 Binary files a/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip and b/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip differ diff --git a/_downloads/082a989f1bd7a119850f46d625ce78d6/plot_investigate_pipeline.ipynb b/_downloads/082a989f1bd7a119850f46d625ce78d6/plot_investigate_pipeline.ipynb index 47af91fbe..0ec4ae15a 100644 --- a/_downloads/082a989f1bd7a119850f46d625ce78d6/plot_investigate_pipeline.ipynb +++ b/_downloads/082a989f1bd7a119850f46d625ce78d6/plot_investigate_pipeline.ipynb @@ -1,126 +1,126 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Investigate a pipeline\n\nThe following example shows how to look into a converted\nmodels and easily find errors at every step of the pipeline.\n\n## Create a pipeline\n\nWe reuse the pipeline implemented in example\n`Pipelining: chaining a PCA and a logistic regression\n`_.\nThere is one change because\n`ONNX-ML Imputer\n`_\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnx\nimport sklearn\nimport numpy\nimport pickle\nfrom skl2onnx.helpers import collect_intermediate_steps\nimport onnxruntime as rt\nfrom onnxconverter_common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\n\npipe = Pipeline(steps=[('pca', PCA()),\n ('logistic', LogisticRegression())])\n\ndigits = datasets.load_digits()\nX_digits = digits.data[:1000]\ny_digits = digits.target[:1000]\n\npipe.fit(X_digits, y_digits)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]\nmodel_onnx = convert_sklearn(pipe, initial_types=initial_types,\n target_opset=12)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nprint(\"skl predict_proba\")\nprint(pipe.predict_proba(X_digits[:2]))\nonx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1]\ndf = pd.DataFrame(onx_pred)\nprint(\"onnx predict_proba\")\nprint(df.values)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Intermediate steps\n\nLet's imagine the final output is wrong and we need\nto look into each component of the pipeline which one\nis failing. The following method modifies the scikit-learn\npipeline to steal the intermediate outputs and produces\nan smaller ONNX graph for every operator.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "steps = collect_intermediate_steps(pipe, \"pipeline\",\n initial_types)\n\nassert len(steps) == 2\n\npipe.predict_proba(X_digits[:2])\n\nfor i, step in enumerate(steps):\n onnx_step = step['onnx_step']\n sess = rt.InferenceSession(onnx_step.SerializeToString())\n onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)})\n skl_outputs = step['model']._debug.outputs\n print(\"step 1\", type(step['model']))\n print(\"skl outputs\")\n print(skl_outputs)\n print(\"onnx outputs\")\n print(onnx_outputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Pickle\n\nEach steps is a separate model in the pipeline.\nIt can be pickle independetly from the others.\nAttribute *_debug* contains all the information\nneeded to *replay* the prediction of the model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "to_save = {\n 'model': steps[1]['model'],\n 'data_input': steps[1]['model']._debug.inputs,\n 'data_output': steps[1]['model']._debug.outputs,\n 'inputs': steps[1]['inputs'],\n 'outputs': steps[1]['outputs'],\n}\ndel steps[1]['model']._debug\n\nwith open('classifier.pkl', 'wb') as f:\n pickle.dump(to_save, f)\n\nwith open('classifier.pkl', 'rb') as f:\n restored = pickle.load(f)\n\nprint(restored['model'].predict_proba(restored['data_input']['predict_proba']))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Investigate a pipeline\n\nThe following example shows how to look into a converted\nmodels and easily find errors at every step of the pipeline.\n\n## Create a pipeline\n\nWe reuse the pipeline implemented in example\n[Pipelining: chaining a PCA and a logistic regression](https://scikit-learn.org/stable/auto_examples/\ncompose/plot_digits_pipe.html).\nThere is one change because\n[ONNX-ML Imputer](https://github.com/onnx/onnx/blob/master/docs/\nOperators-ml.md#ai.onnx.ml.Imputer)\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnx\nimport sklearn\nimport numpy\nimport pickle\nfrom skl2onnx.helpers import collect_intermediate_steps\nimport onnxruntime as rt\nfrom onnxconverter_common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\n\npipe = Pipeline(steps=[('pca', PCA()),\n ('logistic', LogisticRegression())])\n\ndigits = datasets.load_digits()\nX_digits = digits.data[:1000]\ny_digits = digits.target[:1000]\n\npipe.fit(X_digits, y_digits)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]\nmodel_onnx = convert_sklearn(pipe, initial_types=initial_types,\n target_opset=12)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nprint(\"skl predict_proba\")\nprint(pipe.predict_proba(X_digits[:2]))\nonx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1]\ndf = pd.DataFrame(onx_pred)\nprint(\"onnx predict_proba\")\nprint(df.values)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Intermediate steps\n\nLet's imagine the final output is wrong and we need\nto look into each component of the pipeline which one\nis failing. The following method modifies the scikit-learn\npipeline to steal the intermediate outputs and produces\nan smaller ONNX graph for every operator.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "steps = collect_intermediate_steps(pipe, \"pipeline\",\n initial_types)\n\nassert len(steps) == 2\n\npipe.predict_proba(X_digits[:2])\n\nfor i, step in enumerate(steps):\n onnx_step = step['onnx_step']\n sess = rt.InferenceSession(onnx_step.SerializeToString())\n onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)})\n skl_outputs = step['model']._debug.outputs\n print(\"step 1\", type(step['model']))\n print(\"skl outputs\")\n print(skl_outputs)\n print(\"onnx outputs\")\n print(onnx_outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pickle\n\nEach steps is a separate model in the pipeline.\nIt can be pickle independetly from the others.\nAttribute *_debug* contains all the information\nneeded to *replay* the prediction of the model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "to_save = {\n 'model': steps[1]['model'],\n 'data_input': steps[1]['model']._debug.inputs,\n 'data_output': steps[1]['model']._debug.outputs,\n 'inputs': steps[1]['inputs'],\n 'outputs': steps[1]['outputs'],\n}\ndel steps[1]['model']._debug\n\nwith open('classifier.pkl', 'wb') as f:\n pickle.dump(to_save, f)\n\nwith open('classifier.pkl', 'rb') as f:\n restored = pickle.load(f)\n\nprint(restored['model'].predict_proba(restored['data_input']['predict_proba']))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/09d45ba012e2596a98f3726d3d16d415/plot_nmf.py b/_downloads/09d45ba012e2596a98f3726d3d16d415/plot_nmf.py index 7e6f8dc67..a6aec7d9a 100644 --- a/_downloads/09d45ba012e2596a98f3726d3d16d415/plot_nmf.py +++ b/_downloads/09d45ba012e2596a98f3726d3d16d415/plot_nmf.py @@ -18,9 +18,6 @@ requires the estimation of a new matrix *W* with a gradient descent. -.. contents:: - :local: - Building a simple model +++++++++++++++++++++++ diff --git a/_downloads/0aeeef579b2d4f691ea5375f7dfe7233/plot_abegin_convert_pipeline.py b/_downloads/0aeeef579b2d4f691ea5375f7dfe7233/plot_abegin_convert_pipeline.py index 64c23160c..2cf8f72db 100644 --- a/_downloads/0aeeef579b2d4f691ea5375f7dfe7233/plot_abegin_convert_pipeline.py +++ b/_downloads/0aeeef579b2d4f691ea5375f7dfe7233/plot_abegin_convert_pipeline.py @@ -14,10 +14,6 @@ converts it into ONNX and finally computes the predictions a different runtime. -.. contents:: - :local: - - Training a pipeline +++++++++++++++++++ """ diff --git a/_downloads/0bad734602ead3989bfccd87f956b020/plot_benchmark_cdist.py b/_downloads/0bad734602ead3989bfccd87f956b020/plot_benchmark_cdist.py index 209ff5c85..b02074b36 100644 --- a/_downloads/0bad734602ead3989bfccd87f956b020/plot_benchmark_cdist.py +++ b/_downloads/0bad734602ead3989bfccd87f956b020/plot_benchmark_cdist.py @@ -11,9 +11,6 @@ CDist and compares its execution time between *onnxruntime* and *scipy*. -.. contents:: - :local: - ONNX Graph with CDist +++++++++++++++++++++ @@ -56,7 +53,8 @@ # We compute the output of CDist operator # with onnxruntime. -sess = InferenceSession(onx.SerializeToString()) +sess = InferenceSession(onx.SerializeToString(), + providers=["CPUExecutionProvider"]) res = sess.run(None, {'X': X, 'Y': Y}) print(res) diff --git a/_downloads/0cd1e1ac3eecbeec9e10834892786d49/plot_custom_parser.ipynb b/_downloads/0cd1e1ac3eecbeec9e10834892786d49/plot_custom_parser.ipynb index 9e7f684ca..df0cb3ea8 100644 --- a/_downloads/0cd1e1ac3eecbeec9e10834892786d49/plot_custom_parser.ipynb +++ b/_downloads/0cd1e1ac3eecbeec9e10834892786d49/plot_custom_parser.ipynb @@ -1,270 +1,270 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# When a custom model is neither a classifier nor a regressor\n\n*scikit-learn*'s API specifies that a regressor produces one\noutputs and a classifier produces two\noutputs, predicted labels and probabilities. The goal here is\nto add a third result which tells if the probability is\nabove a given threshold. That's implemented in method\n*validate*.\n\n## Iris and scoring\n\nA new class is created, it trains any classifier and implements\nthe method *validate* mentioned above.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import inspect\nimport numpy as np\nimport skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.base import ClassifierMixin, BaseEstimator, clone\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import update_registered_converter\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxconverter_common.onnx_ops import (\n apply_identity, apply_cast, apply_greater\n)\nfrom skl2onnx import to_onnx, get_model_alias\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common._registration import get_shape_calculator\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nimport matplotlib.pyplot as plt\n\n\nclass ValidatorClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, estimator=None, threshold=0.75):\n ClassifierMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = LogisticRegression(solver='liblinear')\n self.estimator = estimator\n self.threshold = threshold\n\n def fit(self, X, y, sample_weight=None):\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, y, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, y)\n return self\n\n def predict(self, X):\n return self.estimator_.predict(X)\n\n def predict_proba(self, X):\n return self.estimator_.predict_proba(X)\n\n def validate(self, X):\n pred = self.predict_proba(X)\n mx = pred.max(axis=1)\n return (mx >= self.threshold) * 1\n\n\ndata = load_iris()\nX, y = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nmodel = ValidatorClassifier()\nmodel.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's now measure the indicator which tells\nif the probability of a prediction is above\na threshold.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(model.validate(X_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\nThe conversion fails for a new model because\nthe library does not know any converter associated\nto this new model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(model, X_train[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom converter\n\nWe reuse some pieces of code from `l-custom-model`.\nThe shape calculator defines the shape of every output\nof the converted model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_shape_calculator(operator):\n\n input0 = operator.inputs[0] # inputs in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n if len(outputs) != 3:\n raise RuntimeError(\"3 outputs expected not {}.\".format(len(outputs)))\n\n N = input0.type.shape[0] # number of observations\n C = op.estimator_.classes_.shape[0] # dimension of outputs\n\n outputs[0].type = Int64TensorType([N]) # label\n outputs[1].type = FloatTensorType([N, C]) # probabilities\n outputs[2].type = Int64TensorType([C]) # validation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_converter(scope, operator, container):\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n # We reuse existing converter and declare it\n # as a local operator.\n model = op.estimator_\n alias = get_model_alias(type(model))\n val_op = scope.declare_local_operator(alias, model)\n val_op.inputs = operator.inputs\n\n # We add an intermediate outputs.\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_op.outputs.append(val_label)\n val_op.outputs.append(val_prob)\n\n # We adjust the output of the submodel.\n shape_calc = get_shape_calculator(alias)\n shape_calc(val_op)\n\n # We now handle the validation.\n val_max = scope.get_unique_variable_name('val_max')\n container.add_node('ReduceMax', val_prob.full_name, val_max,\n name=scope.get_unique_operator_name('ReduceMax'),\n axes=[1], keepdims=0)\n\n th_name = scope.get_unique_variable_name('threshold')\n container.add_initializer(\n th_name, onnx_proto.TensorProto.FLOAT, [1], [op.threshold])\n val_bin = scope.get_unique_variable_name('val_bin')\n apply_greater(scope, [val_max, th_name], val_bin, container)\n\n val_val = scope.get_unique_variable_name('validate')\n apply_cast(scope, val_bin, val_val, container,\n to=onnx_proto.TensorProto.INT64)\n\n # We finally link the intermediate output to the shared converter.\n apply_identity(scope, val_label.full_name, outputs[0].full_name, container)\n apply_identity(scope, val_prob.full_name, outputs[1].full_name, container)\n apply_identity(scope, val_val, outputs[2].full_name, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the registration.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And conversion...\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It fails because the library expected the model\nto behave like a classifier which produces two\noutputs. We need to add a custom parser to\ntell the library this model produces three outputs.\n\n## Custom parser\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_val = scope.declare_local_variable('val_val', Int64TensorType())\n this_operator.outputs.append(val_label)\n this_operator.outputs.append(val_prob)\n this_operator.outputs.append(val_val)\n\n # end\n return this_operator.outputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Registration.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter,\n parser=validator_classifier_parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And conversion again.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final test\n\nWe need now to check the results are the same with ONNX.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X32 = X_test[:5].astype(np.float32)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nresults = sess.run(None, {'X': X32})\n\nprint(\"--labels--\")\nprint(\"sklearn\", model.predict(X32))\nprint(\"onnx\", results[0])\nprint(\"--probabilities--\")\nprint(\"sklearn\", model.predict_proba(X32))\nprint(\"onnx\", results[1])\nprint(\"--validation--\")\nprint(\"sklearn\", model.validate(X32))\nprint(\"onnx\", results[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It looks good.\n\n## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"validator_classifier.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot')\n\nimage = plt.imread(\"validator_classifier.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# When a custom model is neither a classifier nor a regressor\n\n*scikit-learn*'s API specifies that a regressor produces one\noutputs and a classifier produces two\noutputs, predicted labels and probabilities. The goal here is\nto add a third result which tells if the probability is\nabove a given threshold. That's implemented in method\n*validate*.\n\n## Iris and scoring\n\nA new class is created, it trains any classifier and implements\nthe method *validate* mentioned above.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import inspect\nimport numpy as np\nimport skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.base import ClassifierMixin, BaseEstimator, clone\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import update_registered_converter\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxconverter_common.onnx_ops import (\n apply_identity, apply_cast, apply_greater\n)\nfrom skl2onnx import to_onnx, get_model_alias\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common._registration import get_shape_calculator\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nimport matplotlib.pyplot as plt\n\n\nclass ValidatorClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, estimator=None, threshold=0.75):\n ClassifierMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = LogisticRegression(solver='liblinear')\n self.estimator = estimator\n self.threshold = threshold\n\n def fit(self, X, y, sample_weight=None):\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, y, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, y)\n return self\n\n def predict(self, X):\n return self.estimator_.predict(X)\n\n def predict_proba(self, X):\n return self.estimator_.predict_proba(X)\n\n def validate(self, X):\n pred = self.predict_proba(X)\n mx = pred.max(axis=1)\n return (mx >= self.threshold) * 1\n\n\ndata = load_iris()\nX, y = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nmodel = ValidatorClassifier()\nmodel.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now measure the indicator which tells\nif the probability of a prediction is above\na threshold.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(model.validate(X_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\nThe conversion fails for a new model because\nthe library does not know any converter associated\nto this new model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(model, X_train[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom converter\n\nWe reuse some pieces of code from `l-custom-model`.\nThe shape calculator defines the shape of every output\nof the converted model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_shape_calculator(operator):\n\n input0 = operator.inputs[0] # inputs in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n if len(outputs) != 3:\n raise RuntimeError(\"3 outputs expected not {}.\".format(len(outputs)))\n\n N = input0.type.shape[0] # number of observations\n C = op.estimator_.classes_.shape[0] # dimension of outputs\n\n outputs[0].type = Int64TensorType([N]) # label\n outputs[1].type = FloatTensorType([N, C]) # probabilities\n outputs[2].type = Int64TensorType([C]) # validation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_converter(scope, operator, container):\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n # We reuse existing converter and declare it\n # as a local operator.\n model = op.estimator_\n alias = get_model_alias(type(model))\n val_op = scope.declare_local_operator(alias, model)\n val_op.inputs = operator.inputs\n\n # We add an intermediate outputs.\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_op.outputs.append(val_label)\n val_op.outputs.append(val_prob)\n\n # We adjust the output of the submodel.\n shape_calc = get_shape_calculator(alias)\n shape_calc(val_op)\n\n # We now handle the validation.\n val_max = scope.get_unique_variable_name('val_max')\n if container.target_opset >= 18:\n axis_name = scope.get_unique_variable_name('axis')\n container.add_initializer(\n axis_name, onnx_proto.TensorProto.INT64, [1], [1])\n container.add_node(\n 'ReduceMax', [val_prob.full_name, axis_name], val_max,\n name=scope.get_unique_operator_name('ReduceMax'),\n keepdims=0)\n else:\n container.add_node(\n 'ReduceMax', val_prob.full_name, val_max,\n name=scope.get_unique_operator_name('ReduceMax'),\n axes=[1], keepdims=0)\n\n th_name = scope.get_unique_variable_name('threshold')\n container.add_initializer(\n th_name, onnx_proto.TensorProto.FLOAT, [1], [op.threshold])\n val_bin = scope.get_unique_variable_name('val_bin')\n apply_greater(scope, [val_max, th_name], val_bin, container)\n\n val_val = scope.get_unique_variable_name('validate')\n apply_cast(scope, val_bin, val_val, container,\n to=onnx_proto.TensorProto.INT64)\n\n # We finally link the intermediate output to the shared converter.\n apply_identity(scope, val_label.full_name, outputs[0].full_name, container)\n apply_identity(scope, val_prob.full_name, outputs[1].full_name, container)\n apply_identity(scope, val_val, outputs[2].full_name, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the registration.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And conversion...\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It fails because the library expected the model\nto behave like a classifier which produces two\noutputs. We need to add a custom parser to\ntell the library this model produces three outputs.\n\n## Custom parser\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_val = scope.declare_local_variable('val_val', Int64TensorType())\n this_operator.outputs.append(val_label)\n this_operator.outputs.append(val_prob)\n this_operator.outputs.append(val_val)\n\n # end\n return this_operator.outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Registration.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter,\n parser=validator_classifier_parser)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And conversion again.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final test\n\nWe need now to check the results are the same with ONNX.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X32 = X_test[:5].astype(np.float32)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nresults = sess.run(None, {'X': X32})\n\nprint(\"--labels--\")\nprint(\"sklearn\", model.predict(X32))\nprint(\"onnx\", results[0])\nprint(\"--probabilities--\")\nprint(\"sklearn\", model.predict_proba(X32))\nprint(\"onnx\", results[1])\nprint(\"--validation--\")\nprint(\"sklearn\", model.validate(X32))\nprint(\"onnx\", results[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It looks good.\n\n## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"validator_classifier.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot')\n\nimage = plt.imread(\"validator_classifier.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/17e4280bf900152c98913a15fff50922/plot_dbegin_options_zipmap.py b/_downloads/17e4280bf900152c98913a15fff50922/plot_dbegin_options_zipmap.py index a114ccfc7..9194233ee 100644 --- a/_downloads/17e4280bf900152c98913a15fff50922/plot_dbegin_options_zipmap.py +++ b/_downloads/17e4280bf900152c98913a15fff50922/plot_dbegin_options_zipmap.py @@ -1,191 +1,188 @@ -# SPDX-License-Identifier: Apache-2.0 - - -""" -.. _l-tutorial-example-zipmap: - -Choose appropriate output of a classifier -========================================= - -A scikit-learn classifier usually returns a matrix of probabilities. -By default, *sklearn-onnx* converts that matrix -into a list of dictionaries where each probabily is mapped -to its class id or name. That mechanism retains the class names -but is slower. Let's see what other options are available. - -.. contents:: - :local: - -Train a model and convert it -++++++++++++++++++++++++++++ - -""" -from timeit import repeat -import numpy -import sklearn -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split -import onnxruntime as rt -import onnx -import skl2onnx -from skl2onnx.common.data_types import FloatTensorType -from skl2onnx import to_onnx -from sklearn.linear_model import LogisticRegression -from sklearn.multioutput import MultiOutputClassifier - -iris = load_iris() -X, y = iris.data, iris.target -X = X.astype(numpy.float32) -y = y * 2 + 10 # to get labels different from [0, 1, 2] -X_train, X_test, y_train, y_test = train_test_split(X, y) -clr = LogisticRegression(max_iter=500) -clr.fit(X_train, y_train) -print(clr) - -onx = to_onnx(clr, X_train, target_opset=12) - -############################ -# Default behaviour: zipmap=True -# ++++++++++++++++++++++++++++++ -# -# The output type for the probabilities is a list of -# dictionaries. - -sess = rt.InferenceSession(onx.SerializeToString()) -res = sess.run(None, {'X': X_test}) -print(res[1][:2]) -print("probabilities type:", type(res[1])) -print("type for the first observations:", type(res[1][0])) - -################################### -# Option zipmap=False -# +++++++++++++++++++ -# -# Probabilities are now a matrix. - -initial_type = [('float_input', FloatTensorType([None, 4]))] -options = {id(clr): {'zipmap': False}} -onx2 = to_onnx(clr, X_train, options=options, target_opset=12) - -sess2 = rt.InferenceSession(onx2.SerializeToString()) -res2 = sess2.run(None, {'X': X_test}) -print(res2[1][:2]) -print("probabilities type:", type(res2[1])) -print("type for the first observations:", type(res2[1][0])) - -################################### -# Option zipmap='columns' -# +++++++++++++++++++++++ -# -# This options removes the final operator ZipMap and splits -# the probabilities into columns. The final model produces -# one output for the label, and one output per class. - -options = {id(clr): {'zipmap': 'columns'}} -onx3 = to_onnx(clr, X_train, options=options, target_opset=12) - -sess3 = rt.InferenceSession(onx3.SerializeToString()) -res3 = sess3.run(None, {'X': X_test}) -for i, out in enumerate(sess3.get_outputs()): - print("output: '{}' shape={} values={}...".format( - out.name, res3[i].shape, res3[i][:2])) - - -################################### -# Let's compare prediction time -# +++++++++++++++++++++++++++++ - -print("Average time with ZipMap:") -print(sum(repeat(lambda: sess.run(None, {'X': X_test}), - number=100, repeat=10)) / 10) - -print("Average time without ZipMap:") -print(sum(repeat(lambda: sess2.run(None, {'X': X_test}), - number=100, repeat=10)) / 10) - -print("Average time without ZipMap but with columns:") -print(sum(repeat(lambda: sess3.run(None, {'X': X_test}), - number=100, repeat=10)) / 10) - -# The prediction is much faster without ZipMap -# on this example. -# The optimisation is even faster when the classes -# are described with strings and not integers -# as the final result (list of dictionaries) may copy -# many times the same information with onnxruntime. - -####################################### -# Option zimpap=False and output_class_labels=True -# ++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Option `zipmap=False` seems a better choice because it is -# much faster but labels are lost in the process. Option -# `output_class_labels` can be used to expose the labels -# as a third output. - -initial_type = [('float_input', FloatTensorType([None, 4]))] -options = {id(clr): {'zipmap': False, 'output_class_labels': True}} -onx4 = to_onnx(clr, X_train, options=options, target_opset=12) - -sess4 = rt.InferenceSession(onx4.SerializeToString()) -res4 = sess4.run(None, {'X': X_test}) -print(res4[1][:2]) -print("probabilities type:", type(res4[1])) -print("class labels:", res4[2]) - -########################################### -# Processing time. - -print("Average time without ZipMap but with output_class_labels:") -print(sum(repeat(lambda: sess4.run(None, {'X': X_test}), - number=100, repeat=10)) / 10) - -########################################### -# MultiOutputClassifier -# +++++++++++++++++++++ -# -# This model is equivalent to several classifiers, one for every label -# to predict. Instead of returning a matrix of probabilities, it returns -# a sequence of matrices. Let's first modify the labels to get -# a problem for a MultiOutputClassifier. - -y = numpy.vstack([y, y + 100]).T -y[::5, 1] = 1000 # Let's a fourth class. -print(y[:5]) - -######################################## -# Let's train a MultiOutputClassifier. - -X_train, X_test, y_train, y_test = train_test_split(X, y) -clr = MultiOutputClassifier(LogisticRegression(max_iter=500)) -clr.fit(X_train, y_train) -print(clr) - -onx5 = to_onnx(clr, X_train, target_opset=12) - -sess5 = rt.InferenceSession(onx5.SerializeToString()) -res5 = sess5.run(None, {'X': X_test[:3]}) -print(res5) - -######################################## -# Option zipmap is ignored. Labels are missing but they can be -# added back as a third output. - -onx6 = to_onnx(clr, X_train, target_opset=12, - options={'zipmap': False, 'output_class_labels': True}) - -sess6 = rt.InferenceSession(onx6.SerializeToString()) -res6 = sess6.run(None, {'X': X_test[:3]}) -print("predicted labels", res6[0]) -print("predicted probabilies", res6[1]) -print("class labels", res6[2]) - - -################################# -# **Versions used for this example** - -print("numpy:", numpy.__version__) -print("scikit-learn:", sklearn.__version__) -print("onnx: ", onnx.__version__) -print("onnxruntime: ", rt.__version__) -print("skl2onnx: ", skl2onnx.__version__) +# SPDX-License-Identifier: Apache-2.0 + + +""" +.. _l-tutorial-example-zipmap: + +Choose appropriate output of a classifier +========================================= + +A scikit-learn classifier usually returns a matrix of probabilities. +By default, *sklearn-onnx* converts that matrix +into a list of dictionaries where each probabily is mapped +to its class id or name. That mechanism retains the class names +but is slower. Let's see what other options are available. + +Train a model and convert it +++++++++++++++++++++++++++++ + +""" +from timeit import repeat +import numpy +import sklearn +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +import onnxruntime as rt +import onnx +import skl2onnx +from skl2onnx.common.data_types import FloatTensorType +from skl2onnx import to_onnx +from sklearn.linear_model import LogisticRegression +from sklearn.multioutput import MultiOutputClassifier + +iris = load_iris() +X, y = iris.data, iris.target +X = X.astype(numpy.float32) +y = y * 2 + 10 # to get labels different from [0, 1, 2] +X_train, X_test, y_train, y_test = train_test_split(X, y) +clr = LogisticRegression(max_iter=500) +clr.fit(X_train, y_train) +print(clr) + +onx = to_onnx(clr, X_train, target_opset=12) + +############################ +# Default behaviour: zipmap=True +# ++++++++++++++++++++++++++++++ +# +# The output type for the probabilities is a list of +# dictionaries. + +sess = rt.InferenceSession(onx.SerializeToString()) +res = sess.run(None, {'X': X_test}) +print(res[1][:2]) +print("probabilities type:", type(res[1])) +print("type for the first observations:", type(res[1][0])) + +################################### +# Option zipmap=False +# +++++++++++++++++++ +# +# Probabilities are now a matrix. + +initial_type = [('float_input', FloatTensorType([None, 4]))] +options = {id(clr): {'zipmap': False}} +onx2 = to_onnx(clr, X_train, options=options, target_opset=12) + +sess2 = rt.InferenceSession(onx2.SerializeToString()) +res2 = sess2.run(None, {'X': X_test}) +print(res2[1][:2]) +print("probabilities type:", type(res2[1])) +print("type for the first observations:", type(res2[1][0])) + +################################### +# Option zipmap='columns' +# +++++++++++++++++++++++ +# +# This options removes the final operator ZipMap and splits +# the probabilities into columns. The final model produces +# one output for the label, and one output per class. + +options = {id(clr): {'zipmap': 'columns'}} +onx3 = to_onnx(clr, X_train, options=options, target_opset=12) + +sess3 = rt.InferenceSession(onx3.SerializeToString()) +res3 = sess3.run(None, {'X': X_test}) +for i, out in enumerate(sess3.get_outputs()): + print("output: '{}' shape={} values={}...".format( + out.name, res3[i].shape, res3[i][:2])) + + +################################### +# Let's compare prediction time +# +++++++++++++++++++++++++++++ + +print("Average time with ZipMap:") +print(sum(repeat(lambda: sess.run(None, {'X': X_test}), + number=100, repeat=10)) / 10) + +print("Average time without ZipMap:") +print(sum(repeat(lambda: sess2.run(None, {'X': X_test}), + number=100, repeat=10)) / 10) + +print("Average time without ZipMap but with columns:") +print(sum(repeat(lambda: sess3.run(None, {'X': X_test}), + number=100, repeat=10)) / 10) + +# The prediction is much faster without ZipMap +# on this example. +# The optimisation is even faster when the classes +# are described with strings and not integers +# as the final result (list of dictionaries) may copy +# many times the same information with onnxruntime. + +####################################### +# Option zimpap=False and output_class_labels=True +# ++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Option `zipmap=False` seems a better choice because it is +# much faster but labels are lost in the process. Option +# `output_class_labels` can be used to expose the labels +# as a third output. + +initial_type = [('float_input', FloatTensorType([None, 4]))] +options = {id(clr): {'zipmap': False, 'output_class_labels': True}} +onx4 = to_onnx(clr, X_train, options=options, target_opset=12) + +sess4 = rt.InferenceSession(onx4.SerializeToString()) +res4 = sess4.run(None, {'X': X_test}) +print(res4[1][:2]) +print("probabilities type:", type(res4[1])) +print("class labels:", res4[2]) + +########################################### +# Processing time. + +print("Average time without ZipMap but with output_class_labels:") +print(sum(repeat(lambda: sess4.run(None, {'X': X_test}), + number=100, repeat=10)) / 10) + +########################################### +# MultiOutputClassifier +# +++++++++++++++++++++ +# +# This model is equivalent to several classifiers, one for every label +# to predict. Instead of returning a matrix of probabilities, it returns +# a sequence of matrices. Let's first modify the labels to get +# a problem for a MultiOutputClassifier. + +y = numpy.vstack([y, y + 100]).T +y[::5, 1] = 1000 # Let's a fourth class. +print(y[:5]) + +######################################## +# Let's train a MultiOutputClassifier. + +X_train, X_test, y_train, y_test = train_test_split(X, y) +clr = MultiOutputClassifier(LogisticRegression(max_iter=500)) +clr.fit(X_train, y_train) +print(clr) + +onx5 = to_onnx(clr, X_train, target_opset=12) + +sess5 = rt.InferenceSession(onx5.SerializeToString()) +res5 = sess5.run(None, {'X': X_test[:3]}) +print(res5) + +######################################## +# Option zipmap is ignored. Labels are missing but they can be +# added back as a third output. + +onx6 = to_onnx(clr, X_train, target_opset=12, + options={'zipmap': False, 'output_class_labels': True}) + +sess6 = rt.InferenceSession(onx6.SerializeToString()) +res6 = sess6.run(None, {'X': X_test[:3]}) +print("predicted labels", res6[0]) +print("predicted probabilies", res6[1]) +print("class labels", res6[2]) + + +################################# +# **Versions used for this example** + +print("numpy:", numpy.__version__) +print("scikit-learn:", sklearn.__version__) +print("onnx: ", onnx.__version__) +print("onnxruntime: ", rt.__version__) +print("skl2onnx: ", skl2onnx.__version__) diff --git a/_downloads/180b5e344e3251ffaf63d473c1d3a95f/plot_logging.ipynb b/_downloads/180b5e344e3251ffaf63d473c1d3a95f/plot_logging.ipynb index 9b62b413d..a4c931ccc 100644 --- a/_downloads/180b5e344e3251ffaf63d473c1d3a95f/plot_logging.ipynb +++ b/_downloads/180b5e344e3251ffaf63d473c1d3a95f/plot_logging.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Logging, verbose\n\nThe conversion of a pipeline fails if it contains an object without any\nassociated converter. It may also fails if one of the object is mapped\nby a custom converter. If the error message is not explicit enough,\nit is possible to enable logging.\n\n## Train a model\n\nA very basic example using random forest and\nthe iris dataset.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import logging\nimport numpy\nimport onnx\nimport onnxruntime as rt\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport skl2onnx\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = DecisionTreeClassifier()\nclr.fit(X_train, y_train)\nprint(clr)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert a model into ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\n\n\nsess = rt.InferenceSession(onx.SerializeToString())\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run([label_name],\n {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion with parameter verbose\n\nverbose is a parameter which prints messages on the standard output.\nIt tells which converter is called. `verbose=1` usually means what *skl2onnx*\nis doing to convert a pipeline. `verbose=2+`\nis reserved for information within converters.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "convert_sklearn(clr, initial_types=initial_type, target_opset=12, verbose=1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion with logging\n\nThis is very detailed logging. It which operators or variables\n(output of converters) is processed, which node is created...\nThis information may be useful when a custom converter is being\nimplemented.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "logger = logging.getLogger('skl2onnx')\nlogger.setLevel(logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG)\n\nconvert_sklearn(clr, initial_types=initial_type, target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And to disable it.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "logger.setLevel(logging.INFO)\nlogging.basicConfig(level=logging.INFO)\n\nconvert_sklearn(clr, initial_types=initial_type, target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Logging, verbose\n\nThe conversion of a pipeline fails if it contains an object without any\nassociated converter. It may also fails if one of the object is mapped\nby a custom converter. If the error message is not explicit enough,\nit is possible to enable logging.\n\n## Train a model\n\nA very basic example using random forest and\nthe iris dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import logging\nimport numpy\nimport onnx\nimport onnxruntime as rt\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport skl2onnx\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = DecisionTreeClassifier()\nclr.fit(X_train, y_train)\nprint(clr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert a model into ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\n\n\nsess = rt.InferenceSession(onx.SerializeToString())\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run([label_name],\n {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion with parameter verbose\n\nverbose is a parameter which prints messages on the standard output.\nIt tells which converter is called. `verbose=1` usually means what *skl2onnx*\nis doing to convert a pipeline. `verbose=2+`\nis reserved for information within converters.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "convert_sklearn(clr, initial_types=initial_type, target_opset=12, verbose=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion with logging\n\nThis is very detailed logging. It which operators or variables\n(output of converters) is processed, which node is created...\nThis information may be useful when a custom converter is being\nimplemented.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logger = logging.getLogger('skl2onnx')\nlogger.setLevel(logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG)\n\nconvert_sklearn(clr, initial_types=initial_type, target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And to disable it.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logger.setLevel(logging.INFO)\nlogging.basicConfig(level=logging.INFO)\n\nconvert_sklearn(clr, initial_types=initial_type, target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/186ee738e3c4a97e498a4b21c416aa4e/plot_dbegin_options_list.py b/_downloads/186ee738e3c4a97e498a4b21c416aa4e/plot_dbegin_options_list.py index 5f617a53c..d7c0ab1b1 100644 --- a/_downloads/186ee738e3c4a97e498a4b21c416aa4e/plot_dbegin_options_list.py +++ b/_downloads/186ee738e3c4a97e498a4b21c416aa4e/plot_dbegin_options_list.py @@ -13,9 +13,6 @@ if they use a black listed operator, a couple of them produces a different ONNX graph. -.. contents:: - :local: - GaussianMixture +++++++++++++++ diff --git a/_downloads/1a9827d3afc0434f81b2d2689b993dea/plot_convert_syntax.py b/_downloads/1a9827d3afc0434f81b2d2689b993dea/plot_convert_syntax.py index 5d45bb374..2eced5ec4 100644 --- a/_downloads/1a9827d3afc0434f81b2d2689b993dea/plot_convert_syntax.py +++ b/_downloads/1a9827d3afc0434f81b2d2689b993dea/plot_convert_syntax.py @@ -10,9 +10,6 @@ This example leverages some code added to implement custom converters in an easy way. -.. contents:: - :local: - Predict with onnxruntime ++++++++++++++++++++++++ diff --git a/_downloads/1b4248c6154708f443d9312201e3b54c/plot_convert_model.py b/_downloads/1b4248c6154708f443d9312201e3b54c/plot_convert_model.py index 379791f5c..2ccbf5eb8 100644 --- a/_downloads/1b4248c6154708f443d9312201e3b54c/plot_convert_model.py +++ b/_downloads/1b4248c6154708f443d9312201e3b54c/plot_convert_model.py @@ -14,9 +14,6 @@ * convert it into *ONNX* with *sklearn-onnx*, * predict with *onnxruntime*. -.. contents:: - :local: - Train a model +++++++++++++ @@ -56,7 +53,7 @@ ################################### # Compute the prediction with ONNX Runtime # ++++++++++++++++++++++++++++++++++++++++ -sess = rt.InferenceSession("rf_iris.onnx") +sess = rt.InferenceSession("rf_iris.onnx", providers=["CPUExecutionProvider"]) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name pred_onx = sess.run( diff --git a/_downloads/1d4ff4360610a9582326e12c4c195df7/plot_gexternal_catboost.py b/_downloads/1d4ff4360610a9582326e12c4c195df7/plot_gexternal_catboost.py new file mode 100644 index 000000000..ab5796b4c --- /dev/null +++ b/_downloads/1d4ff4360610a9582326e12c4c195df7/plot_gexternal_catboost.py @@ -0,0 +1,158 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" +.. _example-catboost: + +Convert a pipeline with a CatBoost classifier +============================================= + +.. index:: CatBoost + +:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX* +but many libraries implement :epkg:`scikit-learn` API so that their models +can be included in a :epkg:`scikit-learn` pipeline. This example considers +a pipeline including a :epkg:`CatBoost` model. :epkg:`sklearn-onnx` can convert +the whole pipeline as long as it knows the converter associated to +a *CatBoostClassifier*. Let's see how to do it. + +Train a CatBoostClassifier +++++++++++++++++++++++++++ +""" +from pyquickhelper.helpgen.graphviz_helper import plot_graphviz +import numpy +from onnx.helper import get_attribute_value +from sklearn.datasets import load_iris +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from mlprodict.onnxrt import OnnxInference +import onnxruntime as rt +from skl2onnx import convert_sklearn, update_registered_converter +from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa +from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, guess_tensor_type +from skl2onnx._parse import _apply_zipmap, _get_sklearn_operator_name +from catboost import CatBoostClassifier +from catboost.utils import convert_to_onnx_object + +data = load_iris() +X = data.data[:, :2] +y = data.target + +ind = numpy.arange(X.shape[0]) +numpy.random.shuffle(ind) +X = X[ind, :].copy() +y = y[ind].copy() + +pipe = Pipeline([('scaler', StandardScaler()), + ('lgbm', CatBoostClassifier(n_estimators=3))]) +pipe.fit(X, y) + +###################################### +# Register the converter for CatBoostClassifier +# +++++++++++++++++++++++++++++++++++++++++++++ +# +# The model has no converter implemented in sklearn-onnx. +# We need to register the one coming from *CatBoost* itself. +# However, the converter does not follow sklearn-onnx design and +# needs to be wrapped. + + +def skl2onnx_parser_castboost_classifier(scope, model, inputs, + custom_parsers=None): + options = scope.get_options(model, dict(zipmap=True)) + no_zipmap = isinstance(options['zipmap'], bool) and not options['zipmap'] + + alias = _get_sklearn_operator_name(type(model)) + this_operator = scope.declare_local_operator(alias, model) + this_operator.inputs = inputs + + label_variable = scope.declare_local_variable('label', Int64TensorType()) + prob_dtype = guess_tensor_type(inputs[0].type) + probability_tensor_variable = scope.declare_local_variable('probabilities', prob_dtype) + this_operator.outputs.append(label_variable) + this_operator.outputs.append(probability_tensor_variable) + probability_tensor = this_operator.outputs + + if no_zipmap: + return probability_tensor + + return _apply_zipmap(options['zipmap'], scope, model, + inputs[0].type, probability_tensor) + + +def skl2onnx_convert_catboost(scope, operator, container): + """ + CatBoost returns an ONNX graph with a single node. + This function adds it to the main graph. + """ + onx = convert_to_onnx_object(operator.raw_operator) + opsets = {d.domain: d.version for d in onx.opset_import} + if '' in opsets and opsets[''] >= container.target_opset: + raise RuntimeError( + "CatBoost uses an opset more recent than the target one.") + if len(onx.graph.initializer) > 0 or len(onx.graph.sparse_initializer) > 0: + raise NotImplementedError( + "CatBoost returns a model initializers. This option is not implemented yet.") + if (len(onx.graph.node) not in (1, 2) or not onx.graph.node[0].op_type.startswith("TreeEnsemble") or + (len(onx.graph.node) == 2 and onx.graph.node[1].op_type != "ZipMap")): + types = ", ".join(map(lambda n: n.op_type, onx.graph.node)) + raise NotImplementedError( + f"CatBoost returns {len(onx.graph.node)} != 1 (types={types}). " + f"This option is not implemented yet.") + node = onx.graph.node[0] + atts = {} + for att in node.attribute: + atts[att.name] = get_attribute_value(att) + container.add_node( + node.op_type, [operator.inputs[0].full_name], + [operator.outputs[0].full_name, operator.outputs[1].full_name], + op_domain=node.domain, op_version=opsets.get(node.domain, None), + **atts) + + +update_registered_converter( + CatBoostClassifier, + 'CatBoostCatBoostClassifier', + calculate_linear_classifier_output_shapes, + skl2onnx_convert_catboost, + parser=skl2onnx_parser_castboost_classifier, + options={'nocl': [True, False], 'zipmap': [True, False, 'columns']}) + +################################## +# Convert +# +++++++ + +model_onnx = convert_sklearn( + pipe, 'pipeline_catboost', + [('input', FloatTensorType([None, 2]))], + target_opset={'': 12, 'ai.onnx.ml': 2}) + +# And save. +with open("pipeline_catboost.onnx", "wb") as f: + f.write(model_onnx.SerializeToString()) + +########################### +# Compare the predictions +# +++++++++++++++++++++++ +# +# Predictions with CatBoost. + +print("predict", pipe.predict(X[:5])) +print("predict_proba", pipe.predict_proba(X[:1])) + +########################## +# Predictions with onnxruntime. + +sess = rt.InferenceSession("pipeline_catboost.onnx") + +pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)}) +print("predict", pred_onx[0]) +print("predict_proba", pred_onx[1][:1]) + +############################# +# Final graph +# +++++++++++ + +oinf = OnnxInference(model_onnx) +ax = plot_graphviz(oinf.to_dot()) +ax.get_xaxis().set_visible(False) +ax.get_yaxis().set_visible(False) diff --git a/_downloads/1e0097aeb4f54b635ceba842f3e6889a/plot_pextend_python_runtime.ipynb b/_downloads/1e0097aeb4f54b635ceba842f3e6889a/plot_pextend_python_runtime.ipynb index 80d819e5c..34f091e02 100644 --- a/_downloads/1e0097aeb4f54b635ceba842f3e6889a/plot_pextend_python_runtime.ipynb +++ b/_downloads/1e0097aeb4f54b635ceba842f3e6889a/plot_pextend_python_runtime.ipynb @@ -1,187 +1,187 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Fast design with a python runtime\n\n.. index:: custom python runtime\n\n:epkg:`ONNX operators` do not contain all operators\nfrom :epkg:`numpy`. There is no operator for\n`solve `_ but this one\nis needed to implement the prediction function\nof model :epkg:`NMF`. The converter can be written\nincluding a new ONNX operator but then it requires a\nruntime for it to be tested. This example shows how\nto do that with the python runtime implemented in\n:epkg:`mlprodict`. It may not be :epkg:`onnxruntime`\nbut that speeds up the implementation of the converter.\n\nThe example changes the transformer from\n`l-plot-custom-converter`, the method *predict*\ndecorrelates the variables by computing the eigen\nvalues. Method *fit* does not do anything anymore.\n\n## A transformer which decorrelates variables\n\nThis time, the eigen values are not estimated at\ntraining time but at prediction time.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlprodict.onnxrt.shape_object import ShapeObject\nfrom mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxAdd,\n OnnxCast,\n OnnxDiv,\n OnnxGatherElements,\n OnnxEyeLike,\n OnnxMatMul,\n OnnxMul,\n OnnxPow,\n OnnxReduceMean,\n OnnxShape,\n OnnxSub,\n OnnxTranspose,\n)\nfrom skl2onnx.algebra import OnnxOperator\nfrom mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx.common.data_types import guess_numpy_type, guess_proto_type\nfrom skl2onnx import to_onnx\nfrom skl2onnx import update_registered_converter\n\n\nclass LiveDecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.nf_`: number of expected features\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.nf_ = X.shape[1]\n return self\n\n def transform(self, X):\n mean_ = numpy.mean(X, axis=0, keepdims=True)\n X2 = X - mean_\n V = X2.T @ X2 / X2.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n coef_ = root\n return (X - mean_) @ coef_\n\n\ndef test_live_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = LiveDecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n dec = LiveDecorrelateTransformer(alpha=1e-10)\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.transform(X), dec2.transform(X))\n\n\ntest_live_decorrelate_transformer()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Everything works as expected.\n\n## Extend ONNX\n\nThe conversion requires one operator to compute\nthe eigen values and vectors. The list of\n:epkg:`ONNX operators` does not contain anything\nwhich produces eigen values. It does not seem\nefficient to implement an algorithm with existing\nONNX operators to find eigen values.\nA new operator must be\nadded, we give it the same name *Eig* as in :epkg:`numpy`.\nIt would take a matrix and would produce one or two outputs,\nthe eigen values and the eigen vectors.\nJust for the exercise, a parameter specifies\nto output the eigen vectors as a second output.\n\n### New ONNX operator\n\nAny unknown operator can be\nadded to an ONNX graph. Operators are grouped by domain,\n`''` or `ai.onnx` refers to matrix computation.\n`ai.onnx.ml` refers to usual machine learning models.\nNew domains are officially supported by :epkg:`onnx` package.\nWe want to create a new operator `Eig` of domain `onnxcustom`.\nIt must be declared in a class, then a converter can use it.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "class OnnxEig(OnnxOperator):\n \"\"\"\n Defines a custom operator not defined by ONNX\n specifications but in onnxruntime.\n \"\"\"\n\n since_version = 1 # last changed in this version\n expected_inputs = [('X', 'T')] # input names and types\n expected_outputs = [('EigenValues', 'T'), # output names and types\n ('EigenVectors', 'T')]\n input_range = [1, 1] # only one input is allowed\n output_range = [1, 2] # 1 or 2 outputs are produced\n is_deprecated = False # obviously not deprecated\n domain = 'onnxcustom' # domain, anything is ok\n operator_name = 'Eig' # operator name\n past_version = {} # empty as it is the first version\n\n def __init__(self, X, eigv=False, op_version=None, **kwargs):\n \"\"\"\n :param X: array or OnnxOperatorMixin\n :param eigv: also produces the eigen vectors\n :param op_version: opset version\n :param kwargs: additional parameters\n \"\"\"\n OnnxOperator.__init__(\n self, X, eigv=eigv, op_version=op_version, **kwargs)\n\n\nprint(OnnxEig('X', eigv=True))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can write the converter and\nthe shape calculator.\n\n### shape calculator\n\nNothing new here.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def live_decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.nf_])\n operator.outputs[0].type = output_type" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### converter\n\nThe converter is using the class `OnnxEig`. The code\nis longer than previous converters as the computation is\nmore complex too.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def live_decorrelate_transformer_converter(scope, operator, container):\n # shortcuts\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # We guess its type. If the operator ingests float (or double),\n # it outputs float (or double).\n proto_dtype = guess_proto_type(X.type)\n dtype = guess_numpy_type(X.type)\n\n # Lines in comment specify the numpy computation\n # the ONNX code implements.\n # mean_ = numpy.mean(X, axis=0, keepdims=True)\n mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv)\n\n # This is trick I often use. The converter automatically\n # chooses a name for every output. In big graph,\n # it is difficult to know which operator is producing which output.\n # This line just tells every node must prefix its ouputs with this string.\n # It also applies to all inputs nodes unless this method\n # was called for one of these nodes.\n mean.set_onnx_name_prefix('mean')\n\n # X2 = X - mean_\n X2 = OnnxSub(X, mean, op_version=opv)\n\n # V = X2.T @ X2 / X2.shape[0]\n N = OnnxGatherElements(\n OnnxShape(X, op_version=opv),\n numpy.array([0], dtype=numpy.int64),\n op_version=opv)\n Nf = OnnxCast(N, to=proto_dtype, op_version=opv)\n\n # Every output involved in N and Nf is prefixed by 'N'.\n Nf.set_onnx_name_prefix('N')\n\n V = OnnxDiv(\n OnnxMatMul(OnnxTranspose(X2, op_version=opv),\n X2, op_version=opv),\n Nf, op_version=opv)\n V.set_onnx_name_prefix('V1')\n\n # V += numpy.identity(V.shape[0]) * self.alpha\n V = OnnxAdd(V,\n op.alpha * numpy.identity(op.nf_, dtype=dtype),\n op_version=opv)\n V.set_onnx_name_prefix('V2')\n\n # L, P = numpy.linalg.eig(V)\n LP = OnnxEig(V, eigv=True, op_version=opv)\n LP.set_onnx_name_prefix('LP')\n\n # Linv = L ** (-0.5)\n # Notation LP[0] means OnnxPow is taking the first output\n # of operator OnnxEig, LP[1] would mean the second one\n # LP is not allowed as it is ambiguous\n Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype),\n op_version=opv)\n Linv.set_onnx_name_prefix('Linv')\n\n # diag = numpy.diag(Linv)\n diag = OnnxMul(\n OnnxEyeLike(\n numpy.zeros((op.nf_, op.nf_), dtype=numpy.int64),\n k=0, op_version=opv),\n Linv, op_version=opv)\n diag.set_onnx_name_prefix('diag')\n\n # root = P @ diag @ P.transpose()\n trv = OnnxTranspose(LP[1], op_version=opv)\n coef_left = OnnxMatMul(LP[1], diag, op_version=opv)\n coef_left.set_onnx_name_prefix('coef_left')\n coef = OnnxMatMul(coef_left, trv, op_version=opv)\n coef.set_onnx_name_prefix('coef')\n\n # Same part as before.\n Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])\n Y.set_onnx_name_prefix('Y')\n\n # The last line specifies the final output.\n # Every node involved in the computation is added to the ONNX\n # graph at this stage.\n Y.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Runtime for Eig\n\nHere comes the new part. The python runtime does not\nimplement any runtime for *Eig*. We need to tell the runtime\nto compute eigen values and vectors every time operator *Eig*\nis called. That means implementing two methods,\none to compute, one to infer the shape of the results.\nThe first one is mandatory, the second one can return an\nempty shape if it depends on the inputs. If it is known,\nthe runtime may be able to optimize the computation,\nby reducing allocation for example.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "class OpEig(OpRunCustom):\n\n op_name = 'Eig' # operator name\n atts = {'eigv': True} # operator parameters\n\n def __init__(self, onnx_node, desc=None, **options):\n # constructor, every parameter is added a member\n OpRunCustom.__init__(self, onnx_node, desc=desc,\n expected_attributes=OpEig.atts,\n **options)\n\n def run(self, x, **kwargs):\n # computation\n if self.eigv:\n return numpy.linalg.eig(x)\n return (numpy.linalg.eigvals(x), )\n\n def infer_shapes(self, x):\n # shape inference, if you don't know what to\n # write, just return `ShapeObject(None)`\n if self.eigv:\n return (\n ShapeObject(\n x.shape, dtype=x.dtype,\n name=self.__class__.__name__ + 'Values'),\n ShapeObject(\n x.shape, dtype=x.dtype,\n name=self.__class__.__name__ + 'Vectors'))\n return (ShapeObject(x.shape, dtype=x.dtype,\n name=self.__class__.__name__), )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Registration\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n LiveDecorrelateTransformer, \"SklearnLiveDecorrelateTransformer\",\n live_decorrelate_transformer_shape_calculator,\n live_decorrelate_transformer_converter)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final example\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data = load_iris()\nX = data.data\n\ndec = LiveDecorrelateTransformer()\ndec.fit(X)\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nregister_operator(OpEig, name='Eig', overwrite=False)\n\noinf = OnnxInference(onx)\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = oinf.run({'X': X.astype(numpy.float32)})['variable']\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It works!\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Fast design with a python runtime\n\n.. index:: custom python runtime\n\n:epkg:`ONNX operators` do not contain all operators\nfrom :epkg:`numpy`. There is no operator for\n[solve](https://numpy.org/doc/stable/reference/\ngenerated/numpy.linalg.solve.html) but this one\nis needed to implement the prediction function\nof model :epkg:`NMF`. The converter can be written\nincluding a new ONNX operator but then it requires a\nruntime for it to be tested. This example shows how\nto do that with the python runtime implemented in\n:epkg:`mlprodict`. It may not be :epkg:`onnxruntime`\nbut that speeds up the implementation of the converter.\n\nThe example changes the transformer from\n`l-plot-custom-converter`, the method *predict*\ndecorrelates the variables by computing the eigen\nvalues. Method *fit* does not do anything anymore.\n\n## A transformer which decorrelates variables\n\nThis time, the eigen values are not estimated at\ntraining time but at prediction time.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxAdd,\n OnnxCast,\n OnnxDiv,\n OnnxGatherElements,\n OnnxEyeLike,\n OnnxMatMul,\n OnnxMul,\n OnnxPow,\n OnnxReduceMean_13,\n OnnxShape,\n OnnxSub,\n OnnxTranspose,\n)\nfrom skl2onnx.algebra import OnnxOperator\nfrom mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx.common.data_types import guess_numpy_type, guess_proto_type\nfrom skl2onnx import to_onnx\nfrom skl2onnx import update_registered_converter\n\n\nclass LiveDecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.nf_`: number of expected features\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.nf_ = X.shape[1]\n return self\n\n def transform(self, X):\n mean_ = numpy.mean(X, axis=0, keepdims=True)\n X2 = X - mean_\n V = X2.T @ X2 / X2.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n coef_ = root\n return (X - mean_) @ coef_\n\n\ndef test_live_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = LiveDecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n dec = LiveDecorrelateTransformer(alpha=1e-10)\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.transform(X), dec2.transform(X))\n\n\ntest_live_decorrelate_transformer()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Everything works as expected.\n\n## Extend ONNX\n\nThe conversion requires one operator to compute\nthe eigen values and vectors. The list of\n:epkg:`ONNX operators` does not contain anything\nwhich produces eigen values. It does not seem\nefficient to implement an algorithm with existing\nONNX operators to find eigen values.\nA new operator must be\nadded, we give it the same name *Eig* as in :epkg:`numpy`.\nIt would take a matrix and would produce one or two outputs,\nthe eigen values and the eigen vectors.\nJust for the exercise, a parameter specifies\nto output the eigen vectors as a second output.\n\n### New ONNX operator\n\nAny unknown operator can be\nadded to an ONNX graph. Operators are grouped by domain,\n`''` or `ai.onnx` refers to matrix computation.\n`ai.onnx.ml` refers to usual machine learning models.\nNew domains are officially supported by :epkg:`onnx` package.\nWe want to create a new operator `Eig` of domain `onnxcustom`.\nIt must be declared in a class, then a converter can use it.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class OnnxEig(OnnxOperator):\n \"\"\"\n Defines a custom operator not defined by ONNX\n specifications but in onnxruntime.\n \"\"\"\n\n since_version = 1 # last changed in this version\n expected_inputs = [('X', 'T')] # input names and types\n expected_outputs = [('EigenValues', 'T'), # output names and types\n ('EigenVectors', 'T')]\n input_range = [1, 1] # only one input is allowed\n output_range = [1, 2] # 1 or 2 outputs are produced\n is_deprecated = False # obviously not deprecated\n domain = 'onnxcustom' # domain, anything is ok\n operator_name = 'Eig' # operator name\n past_version = {} # empty as it is the first version\n\n def __init__(self, X, eigv=False, op_version=None, **kwargs):\n \"\"\"\n :param X: array or OnnxOperatorMixin\n :param eigv: also produces the eigen vectors\n :param op_version: opset version\n :param kwargs: additional parameters\n \"\"\"\n OnnxOperator.__init__(\n self, X, eigv=eigv, op_version=op_version, **kwargs)\n\n\nprint(OnnxEig('X', eigv=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can write the converter and\nthe shape calculator.\n\n### shape calculator\n\nNothing new here.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def live_decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.nf_])\n operator.outputs[0].type = output_type" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### converter\n\nThe converter is using the class `OnnxEig`. The code\nis longer than previous converters as the computation is\nmore complex too.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def live_decorrelate_transformer_converter(scope, operator, container):\n # shortcuts\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # We guess its type. If the operator ingests float (or double),\n # it outputs float (or double).\n proto_dtype = guess_proto_type(X.type)\n dtype = guess_numpy_type(X.type)\n\n # Lines in comment specify the numpy computation\n # the ONNX code implements.\n # mean_ = numpy.mean(X, axis=0, keepdims=True)\n mean = OnnxReduceMean_13(X, axes=[0], keepdims=1, op_version=opv)\n\n # This is trick I often use. The converter automatically\n # chooses a name for every output. In big graph,\n # it is difficult to know which operator is producing which output.\n # This line just tells every node must prefix its ouputs with this string.\n # It also applies to all inputs nodes unless this method\n # was called for one of these nodes.\n mean.set_onnx_name_prefix('mean')\n\n # X2 = X - mean_\n X2 = OnnxSub(X, mean, op_version=opv)\n\n # V = X2.T @ X2 / X2.shape[0]\n N = OnnxGatherElements(\n OnnxShape(X, op_version=opv),\n numpy.array([0], dtype=numpy.int64),\n op_version=opv)\n Nf = OnnxCast(N, to=proto_dtype, op_version=opv)\n\n # Every output involved in N and Nf is prefixed by 'N'.\n Nf.set_onnx_name_prefix('N')\n\n V = OnnxDiv(\n OnnxMatMul(OnnxTranspose(X2, op_version=opv),\n X2, op_version=opv),\n Nf, op_version=opv)\n V.set_onnx_name_prefix('V1')\n\n # V += numpy.identity(V.shape[0]) * self.alpha\n V = OnnxAdd(V,\n op.alpha * numpy.identity(op.nf_, dtype=dtype),\n op_version=opv)\n V.set_onnx_name_prefix('V2')\n\n # L, P = numpy.linalg.eig(V)\n LP = OnnxEig(V, eigv=True, op_version=opv)\n LP.set_onnx_name_prefix('LP')\n\n # Linv = L ** (-0.5)\n # Notation LP[0] means OnnxPow is taking the first output\n # of operator OnnxEig, LP[1] would mean the second one\n # LP is not allowed as it is ambiguous\n Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype),\n op_version=opv)\n Linv.set_onnx_name_prefix('Linv')\n\n # diag = numpy.diag(Linv)\n diag = OnnxMul(\n OnnxEyeLike(\n numpy.zeros((op.nf_, op.nf_), dtype=numpy.int64),\n k=0, op_version=opv),\n Linv, op_version=opv)\n diag.set_onnx_name_prefix('diag')\n\n # root = P @ diag @ P.transpose()\n trv = OnnxTranspose(LP[1], op_version=opv)\n coef_left = OnnxMatMul(LP[1], diag, op_version=opv)\n coef_left.set_onnx_name_prefix('coef_left')\n coef = OnnxMatMul(coef_left, trv, op_version=opv)\n coef.set_onnx_name_prefix('coef')\n\n # Same part as before.\n Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])\n Y.set_onnx_name_prefix('Y')\n\n # The last line specifies the final output.\n # Every node involved in the computation is added to the ONNX\n # graph at this stage.\n Y.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Runtime for Eig\n\nHere comes the new part. The python runtime does not\nimplement any runtime for *Eig*. We need to tell the runtime\nto compute eigen values and vectors every time operator *Eig*\nis called. That means implementing two methods,\none to compute, one to infer the shape of the results.\nThe first one is mandatory, the second one can return an\nempty shape if it depends on the inputs. If it is known,\nthe runtime may be able to optimize the computation,\nby reducing allocation for example.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class OpEig(OpRunCustom):\n\n op_name = 'Eig' # operator name\n atts = {'eigv': True} # operator parameters\n\n def __init__(self, onnx_node, desc=None, **options):\n # constructor, every parameter is added a member\n OpRunCustom.__init__(self, onnx_node, desc=desc,\n expected_attributes=OpEig.atts,\n **options)\n\n def run(self, x, **kwargs):\n # computation\n if self.eigv:\n return numpy.linalg.eig(x)\n return (numpy.linalg.eigvals(x), )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Registration\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n LiveDecorrelateTransformer, \"SklearnLiveDecorrelateTransformer\",\n live_decorrelate_transformer_shape_calculator,\n live_decorrelate_transformer_converter)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final example\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data = load_iris()\nX = data.data\n\ndec = LiveDecorrelateTransformer()\ndec.fit(X)\n\nonx = to_onnx(dec, X.astype(numpy.float32), target_opset=17)\n\nregister_operator(OpEig, name='Eig', overwrite=False)\n\noinf = OnnxInference(onx)\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = oinf.run({'X': X.astype(numpy.float32)})['variable']\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It works!\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/20449c1b6f8e4eaf23ae55c0a9cde983/plot_dbegin_options_zipmap.ipynb b/_downloads/20449c1b6f8e4eaf23ae55c0a9cde983/plot_dbegin_options_zipmap.ipynb index b8e6c9208..177966dee 100644 --- a/_downloads/20449c1b6f8e4eaf23ae55c0a9cde983/plot_dbegin_options_zipmap.ipynb +++ b/_downloads/20449c1b6f8e4eaf23ae55c0a9cde983/plot_dbegin_options_zipmap.ipynb @@ -226,7 +226,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/_downloads/272208917bba762fcf08b08724bdc35c/plot_kcustom_converter_wrapper.ipynb b/_downloads/272208917bba762fcf08b08724bdc35c/plot_kcustom_converter_wrapper.ipynb index 5ad8c62d0..a3e8cd557 100644 --- a/_downloads/272208917bba762fcf08b08724bdc35c/plot_kcustom_converter_wrapper.ipynb +++ b/_downloads/272208917bba762fcf08b08724bdc35c/plot_kcustom_converter_wrapper.ipynb @@ -1,169 +1,169 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Implement a new converter using other converters\n\n.. index:: custom converter\n\nIn many cases, a custom models leverages existing models\nwhich already have an associated converter. To convert this\npatchwork, existing converters must be called. This example\nshows how to do that. Example `l-plot-custom-converter`\ncan be rewritten by using a `PCA `_.\nWe could then reuse the converter associated to this model.\n\n## Custom model\n\nLet's implement a simple custom model using\n:epkg:`scikit-learn` API. The model is preprocessing\nwhich decorrelates correlated random variables.\nIf *X* is a matrix of features, $V=\\frac{1}{n}X'X$\nis the covariance matrix. We compute $X V^{1/2}$.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx import to_onnx\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n self.pca_ = PCA(X.shape[1])\n self.pca_.fit(X)\n return self\n\n def transform(self, X):\n return self.pca_.transform(X)\n\n\ndef test_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = DecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n for i in range(cov.shape[0]):\n cov[i, i] = 1.\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.transform(X), dec2.transform(X))\n\n\ntest_decorrelate_transformer()\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(dec, X.astype(numpy.float32))\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This error means there is no converter associated\nto *DecorrelateTransformer*. Let's do it.\nIt requires to implement the two following\nfunctions, a shape calculator and a converter\nwith the same signature as below.\nFirst the shape calculator. We retrieve the input type\nadd tells the output type has the same type,\nthe same number of rows and a specific number of columns.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.pca_.components_.shape[1]])\n operator.outputs[0].type = output_type" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The converter. One thing we need to pay attention to\nis the target opset. This information is important\nto make sure that every node is defined following the\nspecifications of that opset.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # We tell in ONNX language how to compute the unique output.\n # op_version=opv tells which opset is requested\n Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We need to let *skl2onnx* know about the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check it works as well with double.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The differences are smaller with double as expected.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Implement a new converter using other converters\n\n.. index:: custom converter\n\nIn many cases, a custom models leverages existing models\nwhich already have an associated converter. To convert this\npatchwork, existing converters must be called. This example\nshows how to do that. Example `l-plot-custom-converter`\ncan be rewritten by using a [PCA](https://scikit-learn.org/\nstable/modules/generated/sklearn.decomposition.PCA.html).\nWe could then reuse the converter associated to this model.\n\n## Custom model\n\nLet's implement a simple custom model using\n:epkg:`scikit-learn` API. The model is preprocessing\nwhich decorrelates correlated random variables.\nIf *X* is a matrix of features, $V=\\frac{1}{n}X'X$\nis the covariance matrix. We compute $X V^{1/2}$.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx import to_onnx\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n self.pca_ = PCA(X.shape[1])\n self.pca_.fit(X)\n return self\n\n def transform(self, X):\n return self.pca_.transform(X)\n\n\ndef test_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = DecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n for i in range(cov.shape[0]):\n cov[i, i] = 1.\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.transform(X), dec2.transform(X))\n\n\ntest_decorrelate_transformer()\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(dec, X.astype(numpy.float32))\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This error means there is no converter associated\nto *DecorrelateTransformer*. Let's do it.\nIt requires to implement the two following\nfunctions, a shape calculator and a converter\nwith the same signature as below.\nFirst the shape calculator. We retrieve the input type\nadd tells the output type has the same type,\nthe same number of rows and a specific number of columns.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.pca_.components_.shape[1]])\n operator.outputs[0].type = output_type" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The converter. One thing we need to pay attention to\nis the target opset. This information is important\nto make sure that every node is defined following the\nspecifications of that opset.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # We tell in ONNX language how to compute the unique output.\n # op_version=opv tells which opset is requested\n Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to let *skl2onnx* know about the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check it works as well with double.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The differences are smaller with double as expected.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/2804cd11b5af37d980f2d88395ca6d43/plot_usparse_xgboost.py b/_downloads/2804cd11b5af37d980f2d88395ca6d43/plot_usparse_xgboost.py index bc3fd6cdb..92521f95d 100644 --- a/_downloads/2804cd11b5af37d980f2d88395ca6d43/plot_usparse_xgboost.py +++ b/_downloads/2804cd11b5af37d980f2d88395ca6d43/plot_usparse_xgboost.py @@ -17,9 +17,6 @@ do the difference, this ambiguity may introduces discrepencies when converter into ONNX. This example looks into several configurations. -.. contents:: - :local: - Imports, setups +++++++++++++++ @@ -77,6 +74,7 @@ y = data.target df = pandas.DataFrame(X) +df.columns = [f"c{c}" for c in df.columns] df["text"] = [cst[i] for i in y] @@ -146,7 +144,8 @@ def make_pipelines(df_train, y_train, models=None, try: pipe.fit(df_train, y_train) except TypeError as e: - obs = dict(model=model.__name__, pipe=pipe, error=e) + obs = dict(model=model.__name__, pipe=pipe, error=e, + model_onnx=None) pipes.append(obs) continue @@ -168,7 +167,7 @@ def make_pipelines(df_train, y_train, models=None, f.write(model_onnx.SerializeToString()) sess = rt.InferenceSession(model_onnx.SerializeToString()) - inputs = {"input": df[[0, 1]].values.astype(numpy.float32), + inputs = {"input": df[["c0", "c1"]].values.astype(numpy.float32), "text": df[["text"]].values} pred_onx = sess.run(None, inputs) diff --git a/_downloads/2ac8700b99079ce591dca009c1107fd2/plot_lcustom_options.py b/_downloads/2ac8700b99079ce591dca009c1107fd2/plot_lcustom_options.py index 05c833251..a3a538294 100644 --- a/_downloads/2ac8700b99079ce591dca009c1107fd2/plot_lcustom_options.py +++ b/_downloads/2ac8700b99079ce591dca009c1107fd2/plot_lcustom_options.py @@ -18,9 +18,6 @@ which uses operator *MatMul*. Option *use_gemm* is used to replace *MatMul* by *Gemm*. -.. contents:: - :local: - Custom model ++++++++++++ diff --git a/_downloads/2bd838cde0591a9be1c6c1430ef2b8c8/plot_gexternal_xgboost.ipynb b/_downloads/2bd838cde0591a9be1c6c1430ef2b8c8/plot_gexternal_xgboost.ipynb index 932d7d936..1275cb0a0 100644 --- a/_downloads/2bd838cde0591a9be1c6c1430ef2b8c8/plot_gexternal_xgboost.ipynb +++ b/_downloads/2bd838cde0591a9be1c6c1430ef2b8c8/plot_gexternal_xgboost.ipynb @@ -1,205 +1,205 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with a XGBoost model\n\n.. index:: XGBoost\n\n:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models\ninto :epkg:`ONNX` but many libraries implement :epkg:`scikit-learn`\nAPI so that their models can be included in a :epkg:`scikit-learn`\npipeline. This example considers a pipeline including a :epkg:`XGBoost`\nmodel. :epkg:`sklearn-onnx` can convert the whole pipeline as long as\nit knows the converter associated to a *XGBClassifier*. Let's see\nhow to do it.\n\n## Train a XGBoost classifier\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nimport onnxruntime as rt\nfrom sklearn.datasets import load_iris, load_diabetes, make_classification\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom xgboost import XGBClassifier, XGBRegressor, DMatrix, train as train_xgb\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn, to_onnx, update_registered_converter\nfrom skl2onnx.common.shape_calculator import (\n calculate_linear_classifier_output_shapes,\n calculate_linear_regressor_output_shapes)\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import (\n convert_xgboost)\nfrom onnxmltools.convert import convert_xgboost as convert_xgboost_booster\n\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('xgb', XGBClassifier(n_estimators=3))])\npipe.fit(X, y)\n\n# The conversion fails but it is expected.\n\ntry:\n convert_sklearn(pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\nexcept Exception as e:\n print(e)\n\n# The error message tells no converter was found\n# for :epkg:`XGBoost` models. By default, :epkg:`sklearn-onnx`\n# only handles models from :epkg:`scikit-learn` but it can\n# be extended to every model following :epkg:`scikit-learn`\n# API as long as the module knows there exists a converter\n# for every model used in a pipeline. That's why\n# we need to register a converter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Register the converter for XGBClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n`onnxmltools...XGBoost.py\n`_.\nand the shape calculator:\n`onnxmltools...Classifier.py\n`_.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert again\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(\n pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_xgboost.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nPredictions with XGBoost.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_xgboost.onnx\")\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Same example with XGBRegressor\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n XGBRegressor, 'XGBoostXGBRegressor',\n calculate_linear_regressor_output_shapes, convert_xgboost)\n\n\ndata = load_diabetes()\nx = data.data\ny = data.target\nX_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5)\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('xgb', XGBRegressor(n_estimators=3))])\npipe.fit(X_train, y_train)\n\nprint(\"predict\", pipe.predict(X_test[:5]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(pipe, X_train.astype(numpy.float32),\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\nsess = rt.InferenceSession(onx.SerializeToString())\npred_onx = sess.run(None, {\"X\": X_test[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0].ravel())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some discrepencies may appear. In that case,\nyou should read `l-example-discrepencies-float-double`.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Same with a Booster\n\nA booster cannot be inserted in a pipeline. It requires\na different conversion function because it does not\nfollow :epkg:`scikit-learn` API.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "x, y = make_classification(n_classes=2, n_features=5,\n n_samples=100,\n random_state=42, n_informative=3)\nX_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5,\n random_state=42)\n\ndtrain = DMatrix(X_train, label=y_train)\n\nparam = {'objective': 'multi:softmax', 'num_class': 3}\nbst = train_xgb(param, dtrain, 10)\n\ninitial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]\n\ntry:\n onx = convert_xgboost_booster(bst, \"name\", initial_types=initial_type)\n cont = True\nexcept AssertionError as e:\n print(\"XGBoost is too recent or onnxmltools too old.\", e)\n cont = False\n\nif cont:\n sess = rt.InferenceSession(onx.SerializeToString())\n input_name = sess.get_inputs()[0].name\n label_name = sess.get_outputs()[0].name\n pred_onx = sess.run(\n [label_name], {input_name: X_test.astype(numpy.float32)})[0]\n print(pred_onx)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a XGBoost model\n\n.. index:: XGBoost\n\n:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models\ninto :epkg:`ONNX` but many libraries implement :epkg:`scikit-learn`\nAPI so that their models can be included in a :epkg:`scikit-learn`\npipeline. This example considers a pipeline including a :epkg:`XGBoost`\nmodel. :epkg:`sklearn-onnx` can convert the whole pipeline as long as\nit knows the converter associated to a *XGBClassifier*. Let's see\nhow to do it.\n\n## Train a XGBoost classifier\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nimport onnxruntime as rt\nfrom sklearn.datasets import load_iris, load_diabetes, make_classification\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom xgboost import XGBClassifier, XGBRegressor, DMatrix, train as train_xgb\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn, to_onnx, update_registered_converter\nfrom skl2onnx.common.shape_calculator import (\n calculate_linear_classifier_output_shapes,\n calculate_linear_regressor_output_shapes)\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import (\n convert_xgboost)\nfrom onnxmltools.convert import convert_xgboost as convert_xgboost_booster\n\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('xgb', XGBClassifier(n_estimators=3))])\npipe.fit(X, y)\n\n# The conversion fails but it is expected.\n\ntry:\n convert_sklearn(pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\nexcept Exception as e:\n print(e)\n\n# The error message tells no converter was found\n# for :epkg:`XGBoost` models. By default, :epkg:`sklearn-onnx`\n# only handles models from :epkg:`scikit-learn` but it can\n# be extended to every model following :epkg:`scikit-learn`\n# API as long as the module knows there exists a converter\n# for every model used in a pipeline. That's why\n# we need to register a converter." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for XGBClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n[onnxmltools...XGBoost.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nxgboost/operator_converters/XGBoost.py).\nand the shape calculator:\n[onnxmltools...Classifier.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nxgboost/shape_calculators/Classifier.py).\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert again\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_xgboost.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nPredictions with XGBoost.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_xgboost.onnx\")\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Same example with XGBRegressor\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n XGBRegressor, 'XGBoostXGBRegressor',\n calculate_linear_regressor_output_shapes, convert_xgboost)\n\n\ndata = load_diabetes()\nx = data.data\ny = data.target\nX_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5)\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('xgb', XGBRegressor(n_estimators=3))])\npipe.fit(X_train, y_train)\n\nprint(\"predict\", pipe.predict(X_test[:5]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(pipe, X_train.astype(numpy.float32),\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\nsess = rt.InferenceSession(onx.SerializeToString())\npred_onx = sess.run(None, {\"X\": X_test[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0].ravel())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some discrepencies may appear. In that case,\nyou should read `l-example-discrepencies-float-double`.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Same with a Booster\n\nA booster cannot be inserted in a pipeline. It requires\na different conversion function because it does not\nfollow :epkg:`scikit-learn` API.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x, y = make_classification(n_classes=2, n_features=5,\n n_samples=100,\n random_state=42, n_informative=3)\nX_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5,\n random_state=42)\n\ndtrain = DMatrix(X_train, label=y_train)\n\nparam = {'objective': 'multi:softmax', 'num_class': 3}\nbst = train_xgb(param, dtrain, 10)\n\ninitial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]\n\ntry:\n onx = convert_xgboost_booster(bst, \"name\", initial_types=initial_type)\n cont = True\nexcept AssertionError as e:\n print(\"XGBoost is too recent or onnxmltools too old.\", e)\n cont = False\n\nif cont:\n sess = rt.InferenceSession(onx.SerializeToString())\n input_name = sess.get_inputs()[0].name\n label_name = sess.get_outputs()[0].name\n pred_onx = sess.run(\n [label_name], {input_name: X_test.astype(numpy.float32)})[0]\n print(pred_onx)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/2d68cfc9bce77e2cf4c3d3101c9419be/plot_woe_transformer.py b/_downloads/2d68cfc9bce77e2cf4c3d3101c9419be/plot_woe_transformer.py index d83cef1bf..bf466eef1 100644 --- a/_downloads/2d68cfc9bce77e2cf4c3d3101c9419be/plot_woe_transformer.py +++ b/_downloads/2d68cfc9bce77e2cf4c3d3101c9419be/plot_woe_transformer.py @@ -12,9 +12,6 @@ .. index:: WOE, WOETransformer -.. contents:: - :local: - A simple example ++++++++++++++++ diff --git a/_downloads/2dbd202de70c8b6a394b8a1c7c1e12b8/plot_pipeline.ipynb b/_downloads/2dbd202de70c8b6a394b8a1c7c1e12b8/plot_pipeline.ipynb index 3f6cd6f86..83b728adc 100644 --- a/_downloads/2dbd202de70c8b6a394b8a1c7c1e12b8/plot_pipeline.ipynb +++ b/_downloads/2dbd202de70c8b6a394b8a1c7c1e12b8/plot_pipeline.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Draw a pipeline\n\nThere is no other way to look into one model stored\nin ONNX format than looking into its node with\n*onnx*. This example demonstrates\nhow to draw a model and to retrieve it in *json*\nformat.\n\n## Retrieve a model in JSON format\n\nThat's the most simple way.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnxruntime\nimport sklearn\nimport numpy\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnx import ModelProto\nimport onnx\nfrom skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul\n\nonnx_fct = OnnxAdd(\n OnnxMul('X', numpy.array([2], dtype=numpy.float32),\n op_version=12),\n numpy.array([[1, 0], [0, 1]], dtype=numpy.float32),\n output_names=['Y'], op_version=12)\n\nX = numpy.array([[4, 5], [-2, 3]], dtype=numpy.float32)\nmodel = onnx_fct.to_onnx({'X': X}, target_opset=12)\nprint(model)\n\nfilename = \"example1.onnx\"\nwith open(filename, \"wb\") as f:\n f.write(model.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Draw a model with ONNX\nWe use `net_drawer.py\n`_\nincluded in *onnx* package.\nWe use *onnx* to load the model\nin a different way than before.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model = ModelProto()\nwith open(filename, 'rb') as fid:\n content = fid.read()\n model.ParseFromString(content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We convert it into a graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(model.graph, name=model.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\"))\npydot_graph.write_dot(\"graph.dot\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then into an image\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "os.system('dot -O -Tpng graph.dot')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Which we display...\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "image = plt.imread(\"graph.dot.png\")\nplt.imshow(image)\nplt.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Draw a pipeline\n\nThere is no other way to look into one model stored\nin ONNX format than looking into its node with\n*onnx*. This example demonstrates\nhow to draw a model and to retrieve it in *json*\nformat.\n\n## Retrieve a model in JSON format\n\nThat's the most simple way.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnxruntime\nimport sklearn\nimport numpy\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnx import ModelProto\nimport onnx\nfrom skl2onnx.algebra.onnx_ops import OnnxAdd, OnnxMul\n\nonnx_fct = OnnxAdd(\n OnnxMul('X', numpy.array([2], dtype=numpy.float32),\n op_version=12),\n numpy.array([[1, 0], [0, 1]], dtype=numpy.float32),\n output_names=['Y'], op_version=12)\n\nX = numpy.array([[4, 5], [-2, 3]], dtype=numpy.float32)\nmodel = onnx_fct.to_onnx({'X': X}, target_opset=12)\nprint(model)\n\nfilename = \"example1.onnx\"\nwith open(filename, \"wb\") as f:\n f.write(model.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Draw a model with ONNX\nWe use [net_drawer.py](https://github.com/onnx/onnx/blob/master/onnx/tools/net_drawer.py)\nincluded in *onnx* package.\nWe use *onnx* to load the model\nin a different way than before.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model = ModelProto()\nwith open(filename, 'rb') as fid:\n content = fid.read()\n model.ParseFromString(content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We convert it into a graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(model.graph, name=model.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\"))\npydot_graph.write_dot(\"graph.dot\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then into an image\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "os.system('dot -O -Tpng graph.dot')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Which we display...\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "image = plt.imread(\"graph.dot.png\")\nplt.imshow(image)\nplt.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/2ef4cf14bc133885eb440627912739b6/plot_transformer_discrepancy.py b/_downloads/2ef4cf14bc133885eb440627912739b6/plot_transformer_discrepancy.py new file mode 100644 index 000000000..5adb28646 --- /dev/null +++ b/_downloads/2ef4cf14bc133885eb440627912739b6/plot_transformer_discrepancy.py @@ -0,0 +1,112 @@ +""" +.. _example-transform-discrepancy: + +Dealing with discrepancies (tf-idf) +=================================== + +.. index:: td-idf + +`TfidfVectorizer `_ +is one transform for which the corresponding converted onnx model +may produce different results. The larger the vocabulary is, +the higher the probability to get different result is. +This example proposes a equivalent model with no discrepancies. + +Imports, setups ++++++++++++++++ + +All imports. It also registered onnx converters for :epgk:`xgboost` +and :epkg:`lightgbm`. +""" +import pprint +import numpy +from sklearn.pipeline import Pipeline +from sklearn.compose import ColumnTransformer +from sklearn.feature_extraction.text import TfidfVectorizer +from onnxruntime import InferenceSession +from skl2onnx import to_onnx + + +def print_sparse_matrix(m): + nonan = numpy.nan_to_num(m) + mi, ma = nonan.min(), nonan.max() + if mi == ma: + ma += 1 + mat = numpy.empty(m.shape, dtype=numpy.str_) + mat[:, :] = '.' + if hasattr(m, 'todense'): + dense = m.todense() + else: + dense = m + for i in range(m.shape[0]): + for j in range(m.shape[1]): + if dense[i, j] > 0: + c = int((dense[i, j] - mi) / (ma - mi) * 25) + mat[i, j] = chr(ord('A') + c) + return '\n'.join(''.join(line) for line in mat) + + +def diff(a, b): + if a.shape != b.shape: + raise ValueError( + f"Cannot compare matrices with different shapes " + f"{a.shape} != {b.shape}.") + d = numpy.abs(a - b).sum() / a.size + return d + +########################################## +# Artificial datasets +# +++++++++++++++++++ +# +# Iris + a text column. + + +strings = numpy.array([ + "This a sentence.", + "This a sentence with more characters $^*&'(-...", + """var = ClassName(var2, user=mail@anywhere.com, pwd""" + """=")_~-('&]@^\\`|[{#")""", + "c79857654", + "https://complex-url.com/;76543u3456?g=hhh&h=23", + "01-03-05T11:12:13", + "https://complex-url.com/;dd76543u3456?g=ddhhh&h=23", +]).reshape((-1, 1)) + +pprint.pprint(strings) + +############################################ +# Fit a TfIdfVectorizer +# +++++++++++++++++++++ + +tfidf = Pipeline([ + ('pre', ColumnTransformer([ + ('tfidf', TfidfVectorizer(), 0) + ])) +]) + +############################# +# We leave a couple of strings out of the training set. + +tfidf.fit(strings[:-2]) +tr = tfidf.transform(strings) +tfidf_step = tfidf.steps[0][1].transformers_[0][1] +# print(f"output columns: {tfidf_step.get_feature_names_out()}") +print("rendered outputs") +print(print_sparse_matrix(tr)) + +############################################# +# Conversion to ONNX +# ++++++++++++++++++ + +onx = to_onnx(tfidf, strings) + + +############################################ +# Execution with ONNX +# +++++++++++++++++++ + +sess = InferenceSession(onx.SerializeToString()) +got = sess.run(None, {'X': strings})[0] +print(f"differences={diff(tr, got):g}") +print(print_sparse_matrix(got)) diff --git a/_downloads/2f8d04b47600766e780eb9c1b5b5a68d/plot_pextend_python_runtime.py b/_downloads/2f8d04b47600766e780eb9c1b5b5a68d/plot_pextend_python_runtime.py index 18dc0eb1c..022415e28 100644 --- a/_downloads/2f8d04b47600766e780eb9c1b5b5a68d/plot_pextend_python_runtime.py +++ b/_downloads/2f8d04b47600766e780eb9c1b5b5a68d/plot_pextend_python_runtime.py @@ -26,16 +26,13 @@ decorrelates the variables by computing the eigen values. Method *fit* does not do anything anymore. -.. contents:: - :local: - A transformer which decorrelates variables ++++++++++++++++++++++++++++++++++++++++++ This time, the eigen values are not estimated at training time but at prediction time. """ -from mlprodict.onnxrt.shape_object import ShapeObject + from mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator from skl2onnx.algebra.onnx_ops import ( OnnxAdd, @@ -46,7 +43,7 @@ OnnxMatMul, OnnxMul, OnnxPow, - OnnxReduceMean, + OnnxReduceMean_13, OnnxShape, OnnxSub, OnnxTranspose, @@ -234,7 +231,7 @@ def live_decorrelate_transformer_converter(scope, operator, container): # Lines in comment specify the numpy computation # the ONNX code implements. # mean_ = numpy.mean(X, axis=0, keepdims=True) - mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv) + mean = OnnxReduceMean_13(X, axes=[0], keepdims=1, op_version=opv) # This is trick I often use. The converter automatically # chooses a name for every output. In big graph, @@ -337,19 +334,6 @@ def run(self, x, **kwargs): return numpy.linalg.eig(x) return (numpy.linalg.eigvals(x), ) - def infer_shapes(self, x): - # shape inference, if you don't know what to - # write, just return `ShapeObject(None)` - if self.eigv: - return ( - ShapeObject( - x.shape, dtype=x.dtype, - name=self.__class__.__name__ + 'Values'), - ShapeObject( - x.shape, dtype=x.dtype, - name=self.__class__.__name__ + 'Vectors')) - return (ShapeObject(x.shape, dtype=x.dtype, - name=self.__class__.__name__), ) ######################################## # Registration @@ -372,7 +356,7 @@ def infer_shapes(self, x): dec = LiveDecorrelateTransformer() dec.fit(X) -onx = to_onnx(dec, X.astype(numpy.float32)) +onx = to_onnx(dec, X.astype(numpy.float32), target_opset=17) register_operator(OpEig, name='Eig', overwrite=False) diff --git a/_downloads/2fac78b9b54759278e978ea04abebcab/plot_gexternal_xgboost.py b/_downloads/2fac78b9b54759278e978ea04abebcab/plot_gexternal_xgboost.py index 1815e2942..fa5814938 100644 --- a/_downloads/2fac78b9b54759278e978ea04abebcab/plot_gexternal_xgboost.py +++ b/_downloads/2fac78b9b54759278e978ea04abebcab/plot_gexternal_xgboost.py @@ -17,9 +17,6 @@ it knows the converter associated to a *XGBClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a XGBoost classifier ++++++++++++++++++++++++++ """ diff --git a/_downloads/3198a363670952cab4b00c49b36e6422/plot_gpr.py b/_downloads/3198a363670952cab4b00c49b36e6422/plot_gpr.py index 34b779894..402fbe1b6 100644 --- a/_downloads/3198a363670952cab4b00c49b36e6422/plot_gpr.py +++ b/_downloads/3198a363670952cab4b00c49b36e6422/plot_gpr.py @@ -15,9 +15,6 @@ but for this particular model, it is better to use double. Let's see how to create an ONNX file using doubles. -.. contents:: - :local: - Train a model +++++++++++++ @@ -27,7 +24,7 @@ import pprint import numpy import sklearn -from sklearn.datasets import load_boston +from sklearn.datasets import load_diabetes from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import DotProduct, RBF from sklearn.model_selection import train_test_split @@ -37,8 +34,8 @@ from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType from skl2onnx import convert_sklearn -bost = load_boston() -X, y = bost.data, bost.target +dataset = load_diabetes() +X, y = dataset.data, dataset.target X_train, X_test, y_train, y_test = train_test_split(X, y) gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.) gpr.fit(X_train, y_train) diff --git a/_downloads/328d7a85bc1ab3272fc9b9b6b317d558/plot_icustom_converter.py b/_downloads/328d7a85bc1ab3272fc9b9b6b317d558/plot_icustom_converter.py index ad57b987e..57c3f7304 100644 --- a/_downloads/328d7a85bc1ab3272fc9b9b6b317d558/plot_icustom_converter.py +++ b/_downloads/328d7a85bc1ab3272fc9b9b6b317d558/plot_icustom_converter.py @@ -22,9 +22,6 @@ This example implements both components for a new model. -.. contents:: - :local: - Custom model ++++++++++++ diff --git a/_downloads/3ea498c48f9870a80045a1cb2e840bbc/plot_ebegin_float_double.py b/_downloads/3ea498c48f9870a80045a1cb2e840bbc/plot_ebegin_float_double.py index 9014b2cd7..78b354df8 100644 --- a/_downloads/3ea498c48f9870a80045a1cb2e840bbc/plot_ebegin_float_double.py +++ b/_downloads/3ea498c48f9870a80045a1cb2e840bbc/plot_ebegin_float_double.py @@ -30,9 +30,6 @@ an example which always produces discrepencies and some ways to overcome this situation. -.. contents:: - :local: - More into the issue +++++++++++++++++++ @@ -53,10 +50,8 @@ the discord areas. """ from mlprodict.sklapi import OnnxPipeline -from skl2onnx.sklapi import CastTransformer, CastRegressor +from skl2onnx.sklapi import CastTransformer from skl2onnx import to_onnx -from mlprodict.onnx_conv import to_onnx as to_onnx_extended -from mlprodict.onnxrt import OnnxInference from onnxruntime import InferenceSession from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor @@ -149,7 +144,8 @@ def diff(p1, p2): return d.max(), (d / numpy.abs(p1)).max() -onx = to_onnx(model, Xi_train[:1].astype(numpy.float32)) +onx = to_onnx(model, Xi_train[:1].astype(numpy.float32), + target_opset=15) sess = InferenceSession(onx.SerializeToString()) @@ -207,7 +203,8 @@ def diff(p1, p2): ########################################## # The discrepencies. -onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32)) +onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32), + target_opset=15) sess2 = InferenceSession(onx2.SerializeToString()) @@ -232,7 +229,8 @@ def diff(p1, p2): model3.fit(Xi_train, yi_train) onx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32), - options={StandardScaler: {'div': 'div_cast'}}) + options={StandardScaler: {'div': 'div_cast'}}, + target_opset=15) sess3 = InferenceSession(onx3.SerializeToString()) @@ -275,14 +273,12 @@ def diff(p1, p2): model_onx.fit(Xi_train, yi_train) ############################################# -# The conversion. +# By using opset 17 and opset 3 for domain ai.onnx.ml, the tree thresholds +# can be stored as double and not float anymore. That lowerss the discrepancies +# even if the outputs are still float. -try: - onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32)) -except ValueError as e: - print("Failing due to %r.\nYou need to update mlprodict." % e) - import sys - sys.exit(0) +onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32), + target_opset=17) sess4 = InferenceSession(onx4.SerializeToString()) @@ -290,76 +286,3 @@ def diff(p1, p2): ort4 = sess4.run(None, {'X': X32})[0] print(diff(skl4, ort4)) - -################################# -# It works too in a more simple way. - -######################################## -# No discrepencies at all? -# ++++++++++++++++++++++++ -# -# Is it possible to get no error at all? -# There is one major obstacle: :epkg:`scikit-learn` -# stores the predicted values in every leave with double -# (`_tree.pyx - _get_value_ndarray -# `_), :epkg:`ONNX` defines the -# the predicted values as floats: :epkg:`TreeEnsembleRegressor`. -# What can we do to solve it? -# What if we could extend ONNX specifications to support -# double instead of floats. -# We reuse what was developped in example -# `Other way to convert `_ -# and a custom ONNX node `TreeEnsembleRegressorDouble -# `_. - - -tree = DecisionTreeRegressor(max_depth=max_depth) -tree.fit(Xi_train, yi_train) - -model_onx = to_onnx_extended(tree, Xi_train[:1].astype(numpy.float64), - rewrite_ops=True) - -oinf5 = OnnxInference(model_onx, runtime='python_compiled') -print(oinf5) - -########################################## -# Let's measure the discrepencies. - -X64 = Xi_test.astype(numpy.float64) -skl5 = tree.predict(X64) -ort5 = oinf5.run({'X': X64})['variable'] - -############################################ -# Perfect, no discrepencies at all. - -print(diff(skl5, ort5)) - -############################################## -# CastRegressor -# +++++++++++++ -# -# The previous example demonstrated the type difference for -# the predicted values explains the small differences between -# :epkg:`scikit-learn` and :epkg:`onnxruntime`. But it does not -# with the current ONNX. Another option is to cast the -# the predictions into floats in the :epkg:`scikit-learn` pipeline. - - -ctree = CastRegressor(DecisionTreeRegressor(max_depth=max_depth)) -ctree.fit(Xi_train, yi_train) - -onx6 = to_onnx(ctree, Xi_train[:1].astype(numpy.float32)) - -sess6 = InferenceSession(onx6.SerializeToString()) - -skl6 = ctree.predict(X32) -ort6 = sess6.run(None, {'X': X32})[0] - -print(diff(skl6, ort6)) - -############################## -# Success! diff --git a/_downloads/41c8f80302a773ec3fe99af5d9cbf0f9/plot_transformer_discrepancy.ipynb b/_downloads/41c8f80302a773ec3fe99af5d9cbf0f9/plot_transformer_discrepancy.ipynb new file mode 100644 index 000000000..915695dee --- /dev/null +++ b/_downloads/41c8f80302a773ec3fe99af5d9cbf0f9/plot_transformer_discrepancy.ipynb @@ -0,0 +1,144 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Dealing with discrepancies (tf-idf)\n\n.. index:: td-idf\n\n[TfidfVectorizer](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.feature_extraction.text.TfidfVectorizer.html)\nis one transform for which the corresponding converted onnx model\nmay produce different results. The larger the vocabulary is,\nthe higher the probability to get different result is.\nThis example proposes a equivalent model with no discrepancies.\n\n## Imports, setups\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand :epkg:`lightgbm`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import pprint\nimport numpy\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import to_onnx\n\n\ndef print_sparse_matrix(m):\n nonan = numpy.nan_to_num(m)\n mi, ma = nonan.min(), nonan.max()\n if mi == ma:\n ma += 1\n mat = numpy.empty(m.shape, dtype=numpy.str_)\n mat[:, :] = '.'\n if hasattr(m, 'todense'):\n dense = m.todense()\n else:\n dense = m\n for i in range(m.shape[0]):\n for j in range(m.shape[1]):\n if dense[i, j] > 0:\n c = int((dense[i, j] - mi) / (ma - mi) * 25)\n mat[i, j] = chr(ord('A') + c)\n return '\\n'.join(''.join(line) for line in mat)\n\n\ndef diff(a, b):\n if a.shape != b.shape:\n raise ValueError(\n f\"Cannot compare matrices with different shapes \"\n f\"{a.shape} != {b.shape}.\")\n d = numpy.abs(a - b).sum() / a.size\n return d" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Artificial datasets\n\nIris + a text column.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "strings = numpy.array([\n \"This a sentence.\",\n \"This a sentence with more characters $^*&'(-...\",\n \"\"\"var = ClassName(var2, user=mail@anywhere.com, pwd\"\"\"\n \"\"\"=\")_~-('&]@^\\\\`|[{#\")\"\"\",\n \"c79857654\",\n \"https://complex-url.com/;76543u3456?g=hhh&h=23\",\n \"01-03-05T11:12:13\",\n \"https://complex-url.com/;dd76543u3456?g=ddhhh&h=23\",\n]).reshape((-1, 1))\n\npprint.pprint(strings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fit a TfIdfVectorizer\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "tfidf = Pipeline([\n ('pre', ColumnTransformer([\n ('tfidf', TfidfVectorizer(), 0)\n ]))\n])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We leave a couple of strings out of the training set.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "tfidf.fit(strings[:-2])\ntr = tfidf.transform(strings)\ntfidf_step = tfidf.steps[0][1].transformers_[0][1]\n# print(f\"output columns: {tfidf_step.get_feature_names_out()}\")\nprint(\"rendered outputs\")\nprint(print_sparse_matrix(tr))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(tfidf, strings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Execution with ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(onx.SerializeToString())\ngot = sess.run(None, {'X': strings})[0]\nprint(f\"differences={diff(tr, got):g}\")\nprint(print_sparse_matrix(got))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/_downloads/4607926c49362b9ebf21a3348a991d4e/plot_pipeline_xgboost.py b/_downloads/4607926c49362b9ebf21a3348a991d4e/plot_pipeline_xgboost.py index a2b752bcd..5de606c31 100644 --- a/_downloads/4607926c49362b9ebf21a3348a991d4e/plot_pipeline_xgboost.py +++ b/_downloads/4607926c49362b9ebf21a3348a991d4e/plot_pipeline_xgboost.py @@ -16,9 +16,6 @@ the whole pipeline as long as it knows the converter associated to a *XGBClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a XGBoost classifier ++++++++++++++++++++++++++ """ diff --git a/_downloads/477e892a1d5bdddff86e8c46e8f7d83b/plot_onnx_operators.py b/_downloads/477e892a1d5bdddff86e8c46e8f7d83b/plot_onnx_operators.py index 831525380..234384fbf 100644 --- a/_downloads/477e892a1d5bdddff86e8c46e8f7d83b/plot_onnx_operators.py +++ b/_downloads/477e892a1d5bdddff86e8c46e8f7d83b/plot_onnx_operators.py @@ -24,10 +24,6 @@ But it is quite verbose and makes it difficult to describe big graphs. *sklearn-onnx* implements a nicer way to test *ONNX* operators. - -.. contents:: - :local: - ONNX Python API +++++++++++++++ diff --git a/_downloads/4fb830ec47c83319dff3227932da0ad5/plot_dbegin_options_list.ipynb b/_downloads/4fb830ec47c83319dff3227932da0ad5/plot_dbegin_options_list.ipynb index c186a6b1b..9bf36e33f 100644 --- a/_downloads/4fb830ec47c83319dff3227932da0ad5/plot_dbegin_options_list.ipynb +++ b/_downloads/4fb830ec47c83319dff3227932da0ad5/plot_dbegin_options_list.ipynb @@ -1,169 +1,169 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Black list operators when converting\n\n.. index:: black list, white list\n\nSome runtimes do not implement a runtime for every\navailable operator in ONNX. The converter does not know\nthat but it is possible to black some operators. Most of\nthe converters do not change their behaviour, they fail\nif they use a black listed operator, a couple of them\nproduces a different ONNX graph.\n\n## GaussianMixture\n\nThe first converter to change its behaviour depending on a black list\nof operators is for model *GaussianMixture*.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nfrom timeit import timeit\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\n\ndata = load_iris()\nX_train, X_test = train_test_split(data.data)\nmodel = GaussianMixture()\nmodel.fit(X_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Default conversion\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n target_opset=12)\nsess = InferenceSession(model_onnx.SerializeToString())\n\nxt = X_test[:5].astype(numpy.float32)\nprint(model.score_samples(xt))\nprint(sess.run(None, {'X': xt})[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Display the ONNX graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion without ReduceLogSumExp\n\nParameter *black_op* is used to tell the converter\nnot to use this operator. Let's see what the converter\nproduces in that case.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx2 = to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp'},\n target_opset=12)\nsess2 = InferenceSession(model_onnx2.SerializeToString())\n\nxt = X_test[:5].astype(numpy.float32)\nprint(model.score_samples(xt))\nprint(sess2.run(None, {'X': xt})[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Display the ONNX graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(model_onnx2)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Processing time\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(timeit(stmt=\"sess.run(None, {'X': xt})\",\n number=10000, globals={'sess': sess, 'xt': xt}))\n\nprint(timeit(stmt=\"sess2.run(None, {'X': xt})\",\n number=10000, globals={'sess2': sess2, 'xt': xt}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model using ReduceLogSumExp is much faster.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## If the converter cannot convert without...\n\nMany converters do not consider the white and black lists\nof operators. If a converter fails to convert without using\na blacklisted operator (or only whitelisted operators),\n*skl2onnx* raises an error.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp', 'Add'},\n target_opset=12)\nexcept RuntimeError as e:\n print('Error:', e)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Black list operators when converting\n\n.. index:: black list, white list\n\nSome runtimes do not implement a runtime for every\navailable operator in ONNX. The converter does not know\nthat but it is possible to black some operators. Most of\nthe converters do not change their behaviour, they fail\nif they use a black listed operator, a couple of them\nproduces a different ONNX graph.\n\n## GaussianMixture\n\nThe first converter to change its behaviour depending on a black list\nof operators is for model *GaussianMixture*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nfrom timeit import timeit\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\n\ndata = load_iris()\nX_train, X_test = train_test_split(data.data)\nmodel = GaussianMixture()\nmodel.fit(X_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Default conversion\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n target_opset=12)\nsess = InferenceSession(model_onnx.SerializeToString())\n\nxt = X_test[:5].astype(numpy.float32)\nprint(model.score_samples(xt))\nprint(sess.run(None, {'X': xt})[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Display the ONNX graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion without ReduceLogSumExp\n\nParameter *black_op* is used to tell the converter\nnot to use this operator. Let's see what the converter\nproduces in that case.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx2 = to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp'},\n target_opset=12)\nsess2 = InferenceSession(model_onnx2.SerializeToString())\n\nxt = X_test[:5].astype(numpy.float32)\nprint(model.score_samples(xt))\nprint(sess2.run(None, {'X': xt})[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Display the ONNX graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(model_onnx2)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Processing time\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(timeit(stmt=\"sess.run(None, {'X': xt})\",\n number=10000, globals={'sess': sess, 'xt': xt}))\n\nprint(timeit(stmt=\"sess2.run(None, {'X': xt})\",\n number=10000, globals={'sess2': sess2, 'xt': xt}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model using ReduceLogSumExp is much faster.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## If the converter cannot convert without...\n\nMany converters do not consider the white and black lists\nof operators. If a converter fails to convert without using\na blacklisted operator (or only whitelisted operators),\n*skl2onnx* raises an error.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(\n model, X_train[:1].astype(numpy.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp', 'Add'},\n target_opset=12)\nexcept RuntimeError as e:\n print('Error:', e)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/4fc9dce03eee96a33ff7deea93fad833/plot_gbegin_cst.py b/_downloads/4fc9dce03eee96a33ff7deea93fad833/plot_gbegin_cst.py index a8ab962ad..b2a9e33ba 100644 --- a/_downloads/4fc9dce03eee96a33ff7deea93fad833/plot_gbegin_cst.py +++ b/_downloads/4fc9dce03eee96a33ff7deea93fad833/plot_gbegin_cst.py @@ -11,9 +11,6 @@ Last sections shows how to remove an output or to promote an intermediate result to an output. -.. contents:: - :local: - Train and convert a model +++++++++++++++++++++++++ diff --git a/_downloads/50887ce6544a37717aedd37e9c6a03e3/plot_abegin_convert_pipeline.ipynb b/_downloads/50887ce6544a37717aedd37e9c6a03e3/plot_abegin_convert_pipeline.ipynb index 882b621f2..3fb092f53 100644 --- a/_downloads/50887ce6544a37717aedd37e9c6a03e3/plot_abegin_convert_pipeline.ipynb +++ b/_downloads/50887ce6544a37717aedd37e9c6a03e3/plot_abegin_convert_pipeline.ipynb @@ -1,169 +1,169 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Train and deploy a scikit-learn pipeline\n\n.. index:: pipeline, deployment\n\nThis program starts from an example in :epkg:`scikit-learn`\ndocumentation: `Plot individual and voting regression predictions\n`_,\nconverts it into ONNX and finally computes the predictions\na different runtime.\n\n\n## Training a pipeline\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.ensemble import (\n GradientBoostingRegressor, RandomForestRegressor,\n VotingRegressor)\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom skl2onnx import to_onnx\nfrom mlprodict.onnxrt import OnnxInference\n\n\nX, y = load_diabetes(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n# Train classifiers\nreg1 = GradientBoostingRegressor(random_state=1, n_estimators=5)\nreg2 = RandomForestRegressor(random_state=1, n_estimators=5)\nreg3 = LinearRegression()\n\nereg = Pipeline(steps=[\n ('voting', VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])),\n])\nereg.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Converts the model\n\nThe second argument gives a sample of the data\nused to train the model. It is used to infer\nthe input type of the ONNX graph. It is converted\ninto single float and ONNX runtimes may not fully\nsupport doubles.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),\n target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction with ONNX\n\nThe first example uses :epkg:`onnxruntime`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(onx.SerializeToString())\npred_ort = sess.run(None, {'X': X_test.astype(numpy.float32)})[0]\n\npred_skl = ereg.predict(X_test.astype(numpy.float32))\n\nprint(\"Onnx Runtime prediction:\\n\", pred_ort[:5])\nprint(\"Sklearn rediction:\\n\", pred_skl[:5])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n## Comparison\n\nBefore deploying, we need to compare that both\n*scikit-learn* and *ONNX* return the same predictions.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(pred_skl, pred_ort))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It looks good. Biggest errors (absolute and relative)\nare within the margin error introduced by using\nfloats instead of doubles.\nWe can save the model into ONNX\nformat and compute the same predictions in many\nplatform using :epkg:`onnxruntime`.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Python runtime\n\nA python runtime can be used as well to compute\nthe prediction. It is not meant to be used into\nproduction (it still relies on python), but it is\nuseful to investigate why the conversion went wrong.\nIt uses module :epkg:`mlprodict`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx, runtime=\"python_compiled\")\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It works almost the same way.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pred_pyrt = oinf.run({'X': X_test.astype(numpy.float32)})['variable']\nprint(diff(pred_skl, pred_pyrt))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Final graph\nYou may need to install graphviz from https://graphviz.org/download/\n+++++++++++\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Train and deploy a scikit-learn pipeline\n\n.. index:: pipeline, deployment\n\nThis program starts from an example in :epkg:`scikit-learn`\ndocumentation: [Plot individual and voting regression predictions](https://scikit-learn.org/stable/auto_examples/ensemble/plot_voting_regressor.html),\nconverts it into ONNX and finally computes the predictions\na different runtime.\n\n## Training a pipeline\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.ensemble import (\n GradientBoostingRegressor, RandomForestRegressor,\n VotingRegressor)\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom skl2onnx import to_onnx\nfrom mlprodict.onnxrt import OnnxInference\n\n\nX, y = load_diabetes(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n# Train classifiers\nreg1 = GradientBoostingRegressor(random_state=1, n_estimators=5)\nreg2 = RandomForestRegressor(random_state=1, n_estimators=5)\nreg3 = LinearRegression()\n\nereg = Pipeline(steps=[\n ('voting', VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])),\n])\nereg.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Converts the model\n\nThe second argument gives a sample of the data\nused to train the model. It is used to infer\nthe input type of the ONNX graph. It is converted\ninto single float and ONNX runtimes may not fully\nsupport doubles.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),\n target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prediction with ONNX\n\nThe first example uses :epkg:`onnxruntime`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(onx.SerializeToString())\npred_ort = sess.run(None, {'X': X_test.astype(numpy.float32)})[0]\n\npred_skl = ereg.predict(X_test.astype(numpy.float32))\n\nprint(\"Onnx Runtime prediction:\\n\", pred_ort[:5])\nprint(\"Sklearn rediction:\\n\", pred_skl[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n## Comparison\n\nBefore deploying, we need to compare that both\n*scikit-learn* and *ONNX* return the same predictions.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(pred_skl, pred_ort))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It looks good. Biggest errors (absolute and relative)\nare within the margin error introduced by using\nfloats instead of doubles.\nWe can save the model into ONNX\nformat and compute the same predictions in many\nplatform using :epkg:`onnxruntime`.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Python runtime\n\nA python runtime can be used as well to compute\nthe prediction. It is not meant to be used into\nproduction (it still relies on python), but it is\nuseful to investigate why the conversion went wrong.\nIt uses module :epkg:`mlprodict`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx, runtime=\"python_compiled\")\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It works almost the same way.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pred_pyrt = oinf.run({'X': X_test.astype(numpy.float32)})['variable']\nprint(diff(pred_skl, pred_pyrt))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Final graph\nYou may need to install graphviz from https://graphviz.org/download/\n+++++++++++\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/53aee3ad507bd90f1282ad1da55bb893/plot_tfidfvectorizer.py b/_downloads/53aee3ad507bd90f1282ad1da55bb893/plot_tfidfvectorizer.py index ddb208074..b33b6765b 100644 --- a/_downloads/53aee3ad507bd90f1282ad1da55bb893/plot_tfidfvectorizer.py +++ b/_downloads/53aee3ad507bd90f1282ad1da55bb893/plot_tfidfvectorizer.py @@ -13,9 +13,6 @@ compose/plot_column_transformer.html>`_ which builds a pipeline to classify text. -.. contents:: - :local: - Train a pipeline with TfidfVectorizer +++++++++++++++++++++++++++++++++++++ @@ -107,7 +104,7 @@ def transform(self, posts): pipeline = Pipeline([ ('union', ColumnTransformer( [ - ('subject', TfidfVectorizer(min_df=50), 0), + ('subject', TfidfVectorizer(min_df=50, max_features=500), 0), ('body_bow', Pipeline([ ('tfidf', TfidfVectorizer()), diff --git a/_downloads/555f58ca2e62b784278c9b441747d2e4/plot_gbegin_dataframe.py b/_downloads/555f58ca2e62b784278c9b441747d2e4/plot_gbegin_dataframe.py index 54cf040ef..33ecc6b59 100644 --- a/_downloads/555f58ca2e62b784278c9b441747d2e4/plot_gbegin_dataframe.py +++ b/_downloads/555f58ca2e62b784278c9b441747d2e4/plot_gbegin_dataframe.py @@ -11,9 +11,6 @@ have usually multiple types, float, integer or string for categories. ONNX also supports that case. -.. contents:: - :local: - A dataset with categories +++++++++++++++++++++++++ diff --git a/_downloads/559bfe2a2fd8f0adfcc4c407bdf43711/plot_logging.py b/_downloads/559bfe2a2fd8f0adfcc4c407bdf43711/plot_logging.py index 77ec5ce99..fcaad1000 100644 --- a/_downloads/559bfe2a2fd8f0adfcc4c407bdf43711/plot_logging.py +++ b/_downloads/559bfe2a2fd8f0adfcc4c407bdf43711/plot_logging.py @@ -12,10 +12,6 @@ by a custom converter. If the error message is not explicit enough, it is possible to enable logging. - -.. contents:: - :local: - Train a model +++++++++++++ diff --git a/_downloads/56934e384f36d0698bf962db791d078d/plot_gexternal_lightgbm.ipynb b/_downloads/56934e384f36d0698bf962db791d078d/plot_gexternal_lightgbm.ipynb index 769d54a7c..c4f395a7e 100644 --- a/_downloads/56934e384f36d0698bf962db791d078d/plot_gexternal_lightgbm.ipynb +++ b/_downloads/56934e384f36d0698bf962db791d078d/plot_gexternal_lightgbm.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with a LightGBM classifier\n\n.. index:: LightGBM\n\n:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX*\nbut many libraries implement :epkg:`scikit-learn` API so that their models\ncan be included in a :epkg:`scikit-learn` pipeline. This example considers\na pipeline including a :epkg:`LightGBM` model. :epkg:`sklearn-onnx` can convert\nthe whole pipeline as long as it knows the converter associated to\na *LGBMClassifier*. Let's see how to do it.\n\n## Train a LightGBM classifier\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport onnxruntime as rt\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', LGBMClassifier(n_estimators=3))])\npipe.fit(X, y)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Register the converter for LGBMClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n`onnxmltools...LightGbm.py\n`_.\nand the shape calculator:\n`onnxmltools...Classifier.py\n`_.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert again\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(\n pipe, 'pipeline_lightgbm',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_lightgbm.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nPredictions with LightGbm.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_lightgbm.onnx\")\n\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a LightGBM classifier\n\n.. index:: LightGBM\n\n:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX*\nbut many libraries implement :epkg:`scikit-learn` API so that their models\ncan be included in a :epkg:`scikit-learn` pipeline. This example considers\na pipeline including a :epkg:`LightGBM` model. :epkg:`sklearn-onnx` can convert\nthe whole pipeline as long as it knows the converter associated to\na *LGBMClassifier*. Let's see how to do it.\n\n## Train a LightGBM classifier\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport onnxruntime as rt\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', LGBMClassifier(n_estimators=3))])\npipe.fit(X, y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for LGBMClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n[onnxmltools...LightGbm.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/operator_converters/LightGbm.py).\nand the shape calculator:\n[onnxmltools...Classifier.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/shape_calculators/Classifier.py).\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert again\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n pipe, 'pipeline_lightgbm',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_lightgbm.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nPredictions with LightGbm.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_lightgbm.onnx\")\n\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/571d4f875003ddfeb88e24bb174c1d02/plot_convert_syntax.ipynb b/_downloads/571d4f875003ddfeb88e24bb174c1d02/plot_convert_syntax.ipynb index d4227b74a..be05f82fe 100644 --- a/_downloads/571d4f875003ddfeb88e24bb174c1d02/plot_convert_syntax.ipynb +++ b/_downloads/571d4f875003ddfeb88e24bb174c1d02/plot_convert_syntax.ipynb @@ -1,252 +1,252 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Different ways to convert a model\n\nThis example leverages some code added to implement custom converters\nin an easy way.\n\n## Predict with onnxruntime\n\nSimple function to check the converted model\nworks fine.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import onnxruntime\nimport onnx\nimport numpy\nimport numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.cluster import KMeans\nfrom sklearn.pipeline import make_pipeline\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import convert_sklearn, to_onnx, wrap_as_onnx_mixin\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.algebra.onnx_ops import OnnxSub, OnnxDiv\nfrom skl2onnx.algebra.onnx_operator_mixin import OnnxOperatorMixin\n\n\ndef predict_with_onnxruntime(onx, X):\n sess = InferenceSession(onx.SerializeToString())\n input_name = sess.get_inputs()[0].name\n res = sess.run(None, {input_name: X.astype(np.float32)})\n return res[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Simple KMeans\n\nThe first way: :func:`convert_sklearn`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\nonx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The second way: :func:`to_onnx`: no need to play with\n:class:`FloatTensorType` anymore.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\nonx = to_onnx(tr, X.astype(np.float32), target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The third way: :func:`wrap_as_onnx_mixin`: wraps\nthe machine learned model into a new class\ninheriting from :class:`OnnxOperatorMixin`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\ntr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)\n\nonx = tr_mixin.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The fourth way: :func:`wrap_as_onnx_mixin`: can be called\nbefore fitting the model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = wrap_as_onnx_mixin(KMeans(n_clusters=2),\n target_opset=12)\ntr.fit(X)\n\nonx = tr.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Pipeline and a custom object\n\nThis is a simple scaler.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "class CustomOpTransformer(BaseEstimator, TransformerMixin,\n OnnxOperatorMixin):\n\n def __init__(self):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.op_version = 12\n\n def fit(self, X, y=None):\n self.W_ = np.mean(X, axis=0)\n self.S_ = np.std(X, axis=0)\n return self\n\n def transform(self, X):\n return (X - self.W_) / self.S_\n\n def onnx_shape_calculator(self):\n def shape_calculator(operator):\n operator.outputs[0].type = operator.inputs[0].type\n return shape_calculator\n\n def to_onnx_operator(self, inputs=None, outputs=('Y', ),\n target_opset=None, **kwargs):\n if inputs is None:\n raise RuntimeError(\"Parameter inputs should contain at least \"\n \"one name.\")\n opv = target_opset or self.op_version\n i0 = self.get_inputs(inputs, 0)\n W = self.W_.astype(np.float32)\n S = self.S_.astype(np.float32)\n return OnnxDiv(OnnxSub(i0, W, op_version=12), S,\n output_names=outputs,\n op_version=opv)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Way 1\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\nonx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Way 2\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\nonx = to_onnx(tr, X.astype(np.float32), target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Way 3\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\ntr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)\ntr_mixin.to_onnx(X.astype(np.float32))\n\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Way 4\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X = np.arange(20).reshape(10, 2)\ntr = wrap_as_onnx_mixin(\n make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),\n target_opset=12)\n\ntr.fit(X)\n\nonx = tr.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer # noqa\npydot_graph = GetPydotGraph(onx.graph, name=onx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_onnx_mixin.dot\")\n\nimport os # noqa\nos.system('dot -O -Gdpi=300 -Tpng pipeline_onnx_mixin.dot')\n\nimport matplotlib.pyplot as plt # noqa\nimage = plt.imread(\"pipeline_onnx_mixin.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Different ways to convert a model\n\nThis example leverages some code added to implement custom converters\nin an easy way.\n\n## Predict with onnxruntime\n\nSimple function to check the converted model\nworks fine.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import onnxruntime\nimport onnx\nimport numpy\nimport numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.cluster import KMeans\nfrom sklearn.pipeline import make_pipeline\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import convert_sklearn, to_onnx, wrap_as_onnx_mixin\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.algebra.onnx_ops import OnnxSub, OnnxDiv\nfrom skl2onnx.algebra.onnx_operator_mixin import OnnxOperatorMixin\n\n\ndef predict_with_onnxruntime(onx, X):\n sess = InferenceSession(onx.SerializeToString())\n input_name = sess.get_inputs()[0].name\n res = sess.run(None, {input_name: X.astype(np.float32)})\n return res[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple KMeans\n\nThe first way: :func:`convert_sklearn`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\nonx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The second way: :func:`to_onnx`: no need to play with\n:class:`FloatTensorType` anymore.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\nonx = to_onnx(tr, X.astype(np.float32), target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The third way: :func:`wrap_as_onnx_mixin`: wraps\nthe machine learned model into a new class\ninheriting from :class:`OnnxOperatorMixin`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = KMeans(n_clusters=2)\ntr.fit(X)\n\ntr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)\n\nonx = tr_mixin.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The fourth way: :func:`wrap_as_onnx_mixin`: can be called\nbefore fitting the model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = wrap_as_onnx_mixin(KMeans(n_clusters=2),\n target_opset=12)\ntr.fit(X)\n\nonx = tr.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pipeline and a custom object\n\nThis is a simple scaler.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "class CustomOpTransformer(BaseEstimator, TransformerMixin,\n OnnxOperatorMixin):\n\n def __init__(self):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.op_version = 12\n\n def fit(self, X, y=None):\n self.W_ = np.mean(X, axis=0)\n self.S_ = np.std(X, axis=0)\n return self\n\n def transform(self, X):\n return (X - self.W_) / self.S_\n\n def onnx_shape_calculator(self):\n def shape_calculator(operator):\n operator.outputs[0].type = operator.inputs[0].type\n return shape_calculator\n\n def to_onnx_operator(self, inputs=None, outputs=('Y', ),\n target_opset=None, **kwargs):\n if inputs is None:\n raise RuntimeError(\"Parameter inputs should contain at least \"\n \"one name.\")\n opv = target_opset or self.op_version\n i0 = self.get_inputs(inputs, 0)\n W = self.W_.astype(np.float32)\n S = self.S_.astype(np.float32)\n return OnnxDiv(OnnxSub(i0, W, op_version=12), S,\n output_names=outputs,\n op_version=opv)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Way 1\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\nonx = convert_sklearn(\n tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],\n target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Way 2\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\nonx = to_onnx(tr, X.astype(np.float32), target_opset=12)\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Way 3\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))\ntr.fit(X)\n\ntr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)\ntr_mixin.to_onnx(X.astype(np.float32))\n\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Way 4\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X = np.arange(20).reshape(10, 2)\ntr = wrap_as_onnx_mixin(\n make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),\n target_opset=12)\n\ntr.fit(X)\n\nonx = tr.to_onnx(X.astype(np.float32))\nprint(predict_with_onnxruntime(onx, X))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer # noqa\npydot_graph = GetPydotGraph(onx.graph, name=onx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_onnx_mixin.dot\")\n\nimport os # noqa\nos.system('dot -O -Gdpi=300 -Tpng pipeline_onnx_mixin.dot')\n\nimport matplotlib.pyplot as plt # noqa\nimage = plt.imread(\"pipeline_onnx_mixin.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/5980f7efdb3a0aecdb3cf61762ad6e94/plot_convert_decision_function.ipynb b/_downloads/5980f7efdb3a0aecdb3cf61762ad6e94/plot_convert_decision_function.ipynb index 47627231a..5365dc811 100644 --- a/_downloads/5980f7efdb3a0aecdb3cf61762ad6e94/plot_convert_decision_function.ipynb +++ b/_downloads/5980f7efdb3a0aecdb3cf61762ad6e94/plot_convert_decision_function.ipynb @@ -1,108 +1,108 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Probabilities or raw scores\n\nA classifier usually returns a matrix of probabilities.\nBy default, *sklearn-onnx* creates an ONNX graph\nwhich returns probabilities but it may skip that\nstep and return raw scores if the model implements\nthe method *decision_function*. Option ``'raw_scores'``\nis used to change the default behaviour. Let's see\nthat on a simple example.\n\n## Train a model and convert it\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport onnxruntime as rt\nimport onnx\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.linear_model import LogisticRegression\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = LogisticRegression(max_iter=500)\nclr.fit(X_train, y_train)\nprint(clr)\n\ninitial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Output type\n\nLet's confirm the output type of the probabilities\nis a list of dictionaries with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(onx.SerializeToString())\nres = sess.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(\"skl\", clr.predict_proba(X_test[:1]))\nprint(\"onnx\", res[1][:2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Raw scores and decision_function\n\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('float_input', FloatTensorType([None, 4]))]\noptions = {id(clr): {'raw_scores': True}}\nonx2 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess2 = rt.InferenceSession(onx2.SerializeToString())\nres2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(\"skl\", clr.decision_function(X_test[:1]))\nprint(\"onnx\", res2[1][:2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Probabilities or raw scores\n\nA classifier usually returns a matrix of probabilities.\nBy default, *sklearn-onnx* creates an ONNX graph\nwhich returns probabilities but it may skip that\nstep and return raw scores if the model implements\nthe method *decision_function*. Option ``'raw_scores'``\nis used to change the default behaviour. Let's see\nthat on a simple example.\n\n## Train a model and convert it\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport onnxruntime as rt\nimport onnx\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.linear_model import LogisticRegression\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = LogisticRegression(max_iter=500)\nclr.fit(X_train, y_train)\nprint(clr)\n\ninitial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Output type\n\nLet's confirm the output type of the probabilities\nis a list of dictionaries with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(onx.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\nres = sess.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(\"skl\", clr.predict_proba(X_test[:1]))\nprint(\"onnx\", res[1][:2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Raw scores and decision_function\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('float_input', FloatTensorType([None, 4]))]\noptions = {id(clr): {'raw_scores': True}}\nonx2 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess2 = rt.InferenceSession(onx2.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\nres2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(\"skl\", clr.decision_function(X_test[:1]))\nprint(\"onnx\", res2[1][:2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/5d173e72e82f531aadc098173f8e31d2/plot_gbegin_dataframe.ipynb b/_downloads/5d173e72e82f531aadc098173f8e31d2/plot_gbegin_dataframe.ipynb index 18cac286f..06e21513e 100644 --- a/_downloads/5d173e72e82f531aadc098173f8e31d2/plot_gbegin_dataframe.ipynb +++ b/_downloads/5d173e72e82f531aadc098173f8e31d2/plot_gbegin_dataframe.ipynb @@ -1,288 +1,288 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Dataframe as an input\n\n.. index:: dataframe\n\nA pipeline usually ingests data as a matrix. It may be converted in a matrix\nif all the data share the same type. But data held in a dataframe\nhave usually multiple types, float, integer or string for categories.\nONNX also supports that case.\n\n## A dataset with categories\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlinsights.plotting import pipeline2dot\nimport numpy\nimport pprint\nfrom mlprodict.onnx_conv import guess_schema_from_data\nfrom onnxruntime import InferenceSession\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nfrom mlprodict.onnx_conv import to_onnx as to_onnx_ext\nfrom skl2onnx import to_onnx\nfrom pandas import DataFrame\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndata = DataFrame([\n dict(CAT1='a', CAT2='c', num1=0.5, num2=0.6, y=0),\n dict(CAT1='b', CAT2='d', num1=0.4, num2=0.8, y=1),\n dict(CAT1='a', CAT2='d', num1=0.5, num2=0.56, y=0),\n dict(CAT1='a', CAT2='d', num1=0.55, num2=0.56, y=1),\n dict(CAT1='a', CAT2='c', num1=0.35, num2=0.86, y=0),\n dict(CAT1='a', CAT2='c', num1=0.5, num2=0.68, y=1),\n])\n\ncat_cols = ['CAT1', 'CAT2']\ntrain_data = data.drop('y', axis=1)\n\n\ncategorical_transformer = Pipeline([\n ('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'))])\npreprocessor = ColumnTransformer(\n transformers=[\n ('cat', categorical_transformer, cat_cols)],\n remainder='passthrough')\npipe = Pipeline([('preprocess', preprocessor),\n ('rf', RandomForestClassifier())])\npipe.fit(train_data, data['y'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Display.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "dot = pipeline2dot(pipe, train_data)\nax = plot_graphviz(dot)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\nFunction *to_onnx* does not handle dataframes.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n onx = to_onnx(pipe, train_data[:1])\nexcept NotImplementedError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But it possible to use an extended one.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx_ext(\n pipe, train_data[:1],\n options={RandomForestClassifier: {'zipmap': False}})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction with ONNX\n\n*onnxruntime* does not support dataframes.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(onx.SerializeToString())\ntry:\n sess.run(None, train_data)\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use a shortcut\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx)\ngot = oinf.run(train_data)\nprint(pipe.predict(train_data))\nprint(got['label'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And probilities.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(pipe.predict_proba(train_data))\nprint(got['probabilities'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It looks ok. Let's dig into the details to\ndirectly use *onnxruntime*.\n\n## Unhide conversion logic with a dataframe\n\nA dataframe can be seen as a set of columns with\ndifferent types. That's what ONNX should see:\na list of inputs, the input name is the column name,\nthe input type is the column type.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "init = guess_schema_from_data(train_data)\n\npprint.pprint(init)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use float instead.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for c in train_data.columns:\n if c not in cat_cols:\n train_data[c] = train_data[c].astype(numpy.float32)\n\ninit = guess_schema_from_data(train_data)\npprint.pprint(init)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's convert with *skl2onnx* only.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx2 = to_onnx(\n pipe, initial_types=init,\n options={RandomForestClassifier: {'zipmap': False}})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's run it with onnxruntime.\nWe need to convert the dataframe into a dictionary\nwhere column names become keys, and column values become\nvalues.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "inputs = {c: train_data[c].values.reshape((-1, 1))\n for c in train_data.columns}\npprint.pprint(inputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Inference.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess2 = InferenceSession(onx2.SerializeToString())\n\ngot2 = sess2.run(None, inputs)\n\nprint(pipe.predict(train_data))\nprint(got2[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And probilities.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(pipe.predict_proba(train_data))\nprint(got2[1])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Dataframe as an input\n\n.. index:: dataframe\n\nA pipeline usually ingests data as a matrix. It may be converted in a matrix\nif all the data share the same type. But data held in a dataframe\nhave usually multiple types, float, integer or string for categories.\nONNX also supports that case.\n\n## A dataset with categories\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlinsights.plotting import pipeline2dot\nimport numpy\nimport pprint\nfrom mlprodict.onnx_conv import guess_schema_from_data\nfrom onnxruntime import InferenceSession\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nfrom mlprodict.onnx_conv import to_onnx as to_onnx_ext\nfrom skl2onnx import to_onnx\nfrom pandas import DataFrame\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndata = DataFrame([\n dict(CAT1='a', CAT2='c', num1=0.5, num2=0.6, y=0),\n dict(CAT1='b', CAT2='d', num1=0.4, num2=0.8, y=1),\n dict(CAT1='a', CAT2='d', num1=0.5, num2=0.56, y=0),\n dict(CAT1='a', CAT2='d', num1=0.55, num2=0.56, y=1),\n dict(CAT1='a', CAT2='c', num1=0.35, num2=0.86, y=0),\n dict(CAT1='a', CAT2='c', num1=0.5, num2=0.68, y=1),\n])\n\ncat_cols = ['CAT1', 'CAT2']\ntrain_data = data.drop('y', axis=1)\n\n\ncategorical_transformer = Pipeline([\n ('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'))])\npreprocessor = ColumnTransformer(\n transformers=[\n ('cat', categorical_transformer, cat_cols)],\n remainder='passthrough')\npipe = Pipeline([('preprocess', preprocessor),\n ('rf', RandomForestClassifier())])\npipe.fit(train_data, data['y'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Display.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "dot = pipeline2dot(pipe, train_data)\nax = plot_graphviz(dot)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\nFunction *to_onnx* does not handle dataframes.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n onx = to_onnx(pipe, train_data[:1])\nexcept NotImplementedError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But it possible to use an extended one.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx_ext(\n pipe, train_data[:1],\n options={RandomForestClassifier: {'zipmap': False}})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prediction with ONNX\n\n*onnxruntime* does not support dataframes.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(onx.SerializeToString())\ntry:\n sess.run(None, train_data)\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use a shortcut\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx)\ngot = oinf.run(train_data)\nprint(pipe.predict(train_data))\nprint(got['label'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And probilities.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(pipe.predict_proba(train_data))\nprint(got['probabilities'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It looks ok. Let's dig into the details to\ndirectly use *onnxruntime*.\n\n## Unhide conversion logic with a dataframe\n\nA dataframe can be seen as a set of columns with\ndifferent types. That's what ONNX should see:\na list of inputs, the input name is the column name,\nthe input type is the column type.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "init = guess_schema_from_data(train_data)\n\npprint.pprint(init)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use float instead.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for c in train_data.columns:\n if c not in cat_cols:\n train_data[c] = train_data[c].astype(numpy.float32)\n\ninit = guess_schema_from_data(train_data)\npprint.pprint(init)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's convert with *skl2onnx* only.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx2 = to_onnx(\n pipe, initial_types=init,\n options={RandomForestClassifier: {'zipmap': False}})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's run it with onnxruntime.\nWe need to convert the dataframe into a dictionary\nwhere column names become keys, and column values become\nvalues.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "inputs = {c: train_data[c].values.reshape((-1, 1))\n for c in train_data.columns}\npprint.pprint(inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Inference.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess2 = InferenceSession(onx2.SerializeToString())\n\ngot2 = sess2.run(None, inputs)\n\nprint(pipe.predict(train_data))\nprint(got2[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And probilities.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(pipe.predict_proba(train_data))\nprint(got2[1])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/62cac3b48a279dd295d232342c9a5a08/plot_nmf.ipynb b/_downloads/62cac3b48a279dd295d232342c9a5a08/plot_nmf.ipynb index 7a8a1ed80..a37077c30 100644 --- a/_downloads/62cac3b48a279dd295d232342c9a5a08/plot_nmf.ipynb +++ b/_downloads/62cac3b48a279dd295d232342c9a5a08/plot_nmf.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Custom Operator for NMF Decomposition\n\n`NMF `_ factorizes an input matrix\ninto two matrices *W, H* of rank *k* so that $WH \\sim M$`.\n$M=(m_{ij})$ may be a binary matrix where *i* is a user\nand *j* a product he bought. The prediction\nfunction depends on whether or not the user needs a\nrecommandation for an existing user or a new user.\nThis example addresses the first case.\n\nThe second case is more complex as it theoretically\nrequires the estimation of a new matrix *W* with a\ngradient descent.\n\n## Building a simple model\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import os\nimport skl2onnx\nimport onnxruntime\nimport sklearn\nfrom sklearn.decomposition import NMF\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnx\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxArrayFeatureExtractor, OnnxMul, OnnxReduceSum)\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom onnxruntime import InferenceSession\n\n\nmat = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0],\n [1, 0, 0, 0], [1, 0, 0, 0]], dtype=np.float64)\nmat[:mat.shape[1], :] += np.identity(mat.shape[1])\n\nmod = NMF(n_components=2)\nW = mod.fit_transform(mat)\nH = mod.components_\npred = mod.inverse_transform(W)\n\nprint(\"original predictions\")\nexp = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n exp.append((i, j, pred[i, j]))\n\nprint(exp)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's rewrite the prediction in a way it is closer\nto the function we need to convert into ONNX.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def predict(W, H, row_index, col_index):\n return np.dot(W[row_index, :], H[:, col_index])\n\n\ngot = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n got.append((i, j, predict(W, H, i, j)))\n\nprint(got)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX\n\nThere is no implemented converter for\n`NMF `_ as the function we plan\nto convert is not transformer or a predictor.\nThe following converter does not need to be registered,\nit just creates an ONNX graph equivalent to function\n*predict* implemented above.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def nmf_to_onnx(W, H, op_version=12):\n \"\"\"\n The function converts a NMF described by matrices\n *W*, *H* (*WH* approximate training data *M*).\n into a function which takes two indices *(i, j)*\n and returns the predictions for it. It assumes\n these indices applies on the training data.\n \"\"\"\n col = OnnxArrayFeatureExtractor(H, 'col')\n row = OnnxArrayFeatureExtractor(W.T, 'row')\n dot = OnnxMul(col, row, op_version=op_version)\n res = OnnxReduceSum(dot, output_names=\"rec\", op_version=op_version)\n indices_type = np.array([0], dtype=np.int64)\n onx = res.to_onnx(inputs={'col': indices_type,\n 'row': indices_type},\n outputs=[('rec', FloatTensorType((None, 1)))],\n target_opset=op_version)\n return onx\n\n\nmodel_onnx = nmf_to_onnx(W.astype(np.float32),\n H.astype(np.float32))\nprint(model_onnx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's compute prediction with it.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(model_onnx.SerializeToString())\n\n\ndef predict_onnx(sess, row_indices, col_indices):\n res = sess.run(None,\n {'col': col_indices,\n 'row': row_indices})\n return res\n\n\nonnx_preds = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n row_indices = np.array([i], dtype=np.int64)\n col_indices = np.array([j], dtype=np.int64)\n pred = predict_onnx(sess, row_indices, col_indices)[0]\n onnx_preds.append((i, j, pred[0, 0]))\n\nprint(onnx_preds)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ONNX graph looks like the following.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\", node_producer=GetOpNodeProducer(\"docstring\"))\npydot_graph.write_dot(\"graph_nmf.dot\")\nos.system('dot -O -Tpng graph_nmf.dot')\nimage = plt.imread(\"graph_nmf.dot.png\")\nplt.imshow(image)\nplt.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Custom Operator for NMF Decomposition\n\n[NMF](https://scikit-learn.org/stable/modules/generated/\nsklearn.decomposition.NMF.html) factorizes an input matrix\ninto two matrices *W, H* of rank *k* so that $WH \\sim M$`.\n$M=(m_{ij})$ may be a binary matrix where *i* is a user\nand *j* a product he bought. The prediction\nfunction depends on whether or not the user needs a\nrecommandation for an existing user or a new user.\nThis example addresses the first case.\n\nThe second case is more complex as it theoretically\nrequires the estimation of a new matrix *W* with a\ngradient descent.\n\n## Building a simple model\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import os\nimport skl2onnx\nimport onnxruntime\nimport sklearn\nfrom sklearn.decomposition import NMF\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnx\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxArrayFeatureExtractor, OnnxMul, OnnxReduceSum)\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom onnxruntime import InferenceSession\n\n\nmat = np.array([[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0],\n [1, 0, 0, 0], [1, 0, 0, 0]], dtype=np.float64)\nmat[:mat.shape[1], :] += np.identity(mat.shape[1])\n\nmod = NMF(n_components=2)\nW = mod.fit_transform(mat)\nH = mod.components_\npred = mod.inverse_transform(W)\n\nprint(\"original predictions\")\nexp = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n exp.append((i, j, pred[i, j]))\n\nprint(exp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's rewrite the prediction in a way it is closer\nto the function we need to convert into ONNX.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def predict(W, H, row_index, col_index):\n return np.dot(W[row_index, :], H[:, col_index])\n\n\ngot = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n got.append((i, j, predict(W, H, i, j)))\n\nprint(got)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX\n\nThere is no implemented converter for\n[NMF](https://scikit-learn.org/stable/modules/generated/\nsklearn.decomposition.NMF.html) as the function we plan\nto convert is not transformer or a predictor.\nThe following converter does not need to be registered,\nit just creates an ONNX graph equivalent to function\n*predict* implemented above.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def nmf_to_onnx(W, H, op_version=12):\n \"\"\"\n The function converts a NMF described by matrices\n *W*, *H* (*WH* approximate training data *M*).\n into a function which takes two indices *(i, j)*\n and returns the predictions for it. It assumes\n these indices applies on the training data.\n \"\"\"\n col = OnnxArrayFeatureExtractor(H, 'col')\n row = OnnxArrayFeatureExtractor(W.T, 'row')\n dot = OnnxMul(col, row, op_version=op_version)\n res = OnnxReduceSum(dot, output_names=\"rec\", op_version=op_version)\n indices_type = np.array([0], dtype=np.int64)\n onx = res.to_onnx(inputs={'col': indices_type,\n 'row': indices_type},\n outputs=[('rec', FloatTensorType((None, 1)))],\n target_opset=op_version)\n return onx\n\n\nmodel_onnx = nmf_to_onnx(W.astype(np.float32),\n H.astype(np.float32))\nprint(model_onnx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's compute prediction with it.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(model_onnx.SerializeToString())\n\n\ndef predict_onnx(sess, row_indices, col_indices):\n res = sess.run(None,\n {'col': col_indices,\n 'row': row_indices})\n return res\n\n\nonnx_preds = []\nfor i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n row_indices = np.array([i], dtype=np.int64)\n col_indices = np.array([j], dtype=np.int64)\n pred = predict_onnx(sess, row_indices, col_indices)[0]\n onnx_preds.append((i, j, pred[0, 0]))\n\nprint(onnx_preds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ONNX graph looks like the following.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\", node_producer=GetOpNodeProducer(\"docstring\"))\npydot_graph.write_dot(\"graph_nmf.dot\")\nos.system('dot -O -Tpng graph_nmf.dot')\nimage = plt.imread(\"graph_nmf.dot.png\")\nplt.imshow(image)\nplt.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/679ac573c5cfc9776ba46bf588a9740f/plot_black_op.ipynb b/_downloads/679ac573c5cfc9776ba46bf588a9740f/plot_black_op.ipynb index bc28401b9..3f07f33b4 100644 --- a/_downloads/679ac573c5cfc9776ba46bf588a9740f/plot_black_op.ipynb +++ b/_downloads/679ac573c5cfc9776ba46bf588a9740f/plot_black_op.ipynb @@ -1,187 +1,187 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a model with a reduced list of operators\n\nSome runtime dedicated to onnx do not implement all the\noperators and a converted model may not run if one of them\nis missing from the list of available operators.\nSome converters may convert a model in different ways\nif the users wants to blacklist some operators.\n\n## GaussianMixture\n\nThe first converter to change its behaviour depending on a black list\nof operators is for model *GaussianMixture*.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import onnxruntime\nimport onnx\nimport numpy\nimport os\nfrom timeit import timeit\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\n\ndata = load_iris()\nX_train, X_test = train_test_split(data.data)\nmodel = GaussianMixture()\nmodel.fit(X_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Default conversion\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n target_opset=12)\nsess = InferenceSession(model_onnx.SerializeToString())\n\nxt = X_test[:5].astype(np.float32)\nprint(model.score_samples(xt))\nprint(sess.run(None, {'X': xt})[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Display the ONNX graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"mixture.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng mixture.dot')\n\nimage = plt.imread(\"mixture.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion without ReduceLogSumExp\n\nParameter *black_op* is used to tell the converter\nnot to use this operator. Let's see what the converter\nproduces in that case.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx2 = to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp'},\n target_opset=12)\nsess2 = InferenceSession(model_onnx2.SerializeToString())\n\nxt = X_test[:5].astype(np.float32)\nprint(model.score_samples(xt))\nprint(sess2.run(None, {'X': xt})[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Display the ONNX graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx2.graph, name=model_onnx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"mixture2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng mixture2.dot')\n\nimage = plt.imread(\"mixture2.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Processing time\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(timeit(stmt=\"sess.run(None, {'X': xt})\",\n number=10000, globals={'sess': sess, 'xt': xt}))\n\nprint(timeit(stmt=\"sess2.run(None, {'X': xt})\",\n number=10000, globals={'sess2': sess2, 'xt': xt}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model using ReduceLogSumExp is much faster.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## If the converter cannot convert without...\n\nMany converters do not consider the white and black lists\nof operators. If a converter fails to convert without using\na blacklisted operator (or only whitelisted operators),\n*skl2onnx* raises an error.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp', 'Add'},\n target_opset=12)\nexcept RuntimeError as e:\n print('Error:', e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a model with a reduced list of operators\n\nSome runtime dedicated to onnx do not implement all the\noperators and a converted model may not run if one of them\nis missing from the list of available operators.\nSome converters may convert a model in different ways\nif the users wants to blacklist some operators.\n\n## GaussianMixture\n\nThe first converter to change its behaviour depending on a black list\nof operators is for model *GaussianMixture*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import onnxruntime\nimport onnx\nimport numpy\nimport os\nfrom timeit import timeit\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import to_onnx\n\ndata = load_iris()\nX_train, X_test = train_test_split(data.data)\nmodel = GaussianMixture()\nmodel.fit(X_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Default conversion\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n target_opset=12)\nsess = InferenceSession(model_onnx.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\n\nxt = X_test[:5].astype(np.float32)\nprint(model.score_samples(xt))\nprint(sess.run(None, {'X': xt})[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Display the ONNX graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"mixture.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng mixture.dot')\n\nimage = plt.imread(\"mixture.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion without ReduceLogSumExp\n\nParameter *black_op* is used to tell the converter\nnot to use this operator. Let's see what the converter\nproduces in that case.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx2 = to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp'},\n target_opset=12)\nsess2 = InferenceSession(model_onnx2.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\n\nxt = X_test[:5].astype(np.float32)\nprint(model.score_samples(xt))\nprint(sess2.run(None, {'X': xt})[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Display the ONNX graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx2.graph, name=model_onnx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"mixture2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng mixture2.dot')\n\nimage = plt.imread(\"mixture2.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Processing time\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(timeit(stmt=\"sess.run(None, {'X': xt})\",\n number=10000, globals={'sess': sess, 'xt': xt}))\n\nprint(timeit(stmt=\"sess2.run(None, {'X': xt})\",\n number=10000, globals={'sess2': sess2, 'xt': xt}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model using ReduceLogSumExp is much faster.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## If the converter cannot convert without...\n\nMany converters do not consider the white and black lists\nof operators. If a converter fails to convert without using\na blacklisted operator (or only whitelisted operators),\n*skl2onnx* raises an error.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(\n model, X_train[:1].astype(np.float32),\n options={id(model): {'score_samples': True}},\n black_op={'ReduceLogSumExp', 'Add'},\n target_opset=12)\nexcept RuntimeError as e:\n print('Error:', e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import sklearn # noqa\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/6977cfd434391e060efd145172d200ae/plot_convert_zipmap.ipynb b/_downloads/6977cfd434391e060efd145172d200ae/plot_convert_zipmap.ipynb index 7b6d92f92..a63d8fab9 100644 --- a/_downloads/6977cfd434391e060efd145172d200ae/plot_convert_zipmap.ipynb +++ b/_downloads/6977cfd434391e060efd145172d200ae/plot_convert_zipmap.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Probabilities as a vector or as a ZipMap\n\nA classifier usually returns a matrix of probabilities.\nBy default, *sklearn-onnx* converts that matrix\ninto a list of dictionaries where each probabily is mapped\nto its class id or name. That mechanism retains the class names.\nThis conversion increases the prediction time and is not\nalways needed. Let's see how to deactivate this behaviour\non the Iris example.\n\n## Train a model and convert it\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from timeit import repeat\nimport numpy\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport onnxruntime as rt\nimport onnx\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.linear_model import LogisticRegression\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = LogisticRegression(max_iter=500)\nclr.fit(X_train, y_train)\nprint(clr)\n\ninitial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Output type\n\nLet's confirm the output type of the probabilities\nis a list of dictionaries with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(onx.SerializeToString())\nres = sess.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(res[1][:2])\nprint(\"probabilities type:\", type(res[1]))\nprint(\"type for the first observations:\", type(res[1][0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Without ZipMap\n\nLet's remove the ZipMap operator.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('float_input', FloatTensorType([None, 4]))]\noptions = {id(clr): {'zipmap': False}}\nonx2 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess2 = rt.InferenceSession(onx2.SerializeToString())\nres2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(res2[1][:2])\nprint(\"probabilities type:\", type(res2[1]))\nprint(\"type for the first observations:\", type(res2[1][0]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One output per class\n\nThis options removes the final operator ZipMap and splits\nthe probabilities into columns. The final model produces\none output for the label, and one output per class.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "options = {id(clr): {'zipmap': 'columns'}}\nonx3 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess3 = rt.InferenceSession(onx3.SerializeToString())\nres3 = sess3.run(None, {'float_input': X_test.astype(numpy.float32)})\nfor i, out in enumerate(sess3.get_outputs()):\n print(\"output: '{}' shape={} values={}...\".format(\n out.name, res3[i].shape, res3[i][:2]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Let's compare prediction time\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X32 = X_test.astype(numpy.float32)\n\nprint(\"Time with ZipMap:\")\nprint(repeat(lambda: sess.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\nprint(\"Time without ZipMap:\")\nprint(repeat(lambda: sess2.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\nprint(\"Time without ZipMap but with columns:\")\nprint(repeat(lambda: sess3.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\n# The prediction is much faster without ZipMap\n# on this example.\n# The optimisation is even faster when the classes\n# are described with strings and not integers\n# as the final result (list of dictionaries) may copy\n# many times the same information with onnxruntime." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Probabilities as a vector or as a ZipMap\n\nA classifier usually returns a matrix of probabilities.\nBy default, *sklearn-onnx* converts that matrix\ninto a list of dictionaries where each probabily is mapped\nto its class id or name. That mechanism retains the class names.\nThis conversion increases the prediction time and is not\nalways needed. Let's see how to deactivate this behaviour\non the Iris example.\n\n## Train a model and convert it\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from timeit import repeat\nimport numpy\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport onnxruntime as rt\nimport onnx\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.linear_model import LogisticRegression\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = LogisticRegression(max_iter=500)\nclr.fit(X_train, y_train)\nprint(clr)\n\ninitial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Output type\n\nLet's confirm the output type of the probabilities\nis a list of dictionaries with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(onx.SerializeToString())\nres = sess.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(res[1][:2])\nprint(\"probabilities type:\", type(res[1]))\nprint(\"type for the first observations:\", type(res[1][0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Without ZipMap\n\nLet's remove the ZipMap operator.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('float_input', FloatTensorType([None, 4]))]\noptions = {id(clr): {'zipmap': False}}\nonx2 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess2 = rt.InferenceSession(onx2.SerializeToString())\nres2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)})\nprint(res2[1][:2])\nprint(\"probabilities type:\", type(res2[1]))\nprint(\"type for the first observations:\", type(res2[1][0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## One output per class\n\nThis options removes the final operator ZipMap and splits\nthe probabilities into columns. The final model produces\none output for the label, and one output per class.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "options = {id(clr): {'zipmap': 'columns'}}\nonx3 = convert_sklearn(clr, initial_types=initial_type, options=options,\n target_opset=12)\n\nsess3 = rt.InferenceSession(onx3.SerializeToString())\nres3 = sess3.run(None, {'float_input': X_test.astype(numpy.float32)})\nfor i, out in enumerate(sess3.get_outputs()):\n print(\"output: '{}' shape={} values={}...\".format(\n out.name, res3[i].shape, res3[i][:2]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Let's compare prediction time\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X32 = X_test.astype(numpy.float32)\n\nprint(\"Time with ZipMap:\")\nprint(repeat(lambda: sess.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\nprint(\"Time without ZipMap:\")\nprint(repeat(lambda: sess2.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\nprint(\"Time without ZipMap but with columns:\")\nprint(repeat(lambda: sess3.run(None, {'float_input': X32}),\n number=100, repeat=10))\n\n# The prediction is much faster without ZipMap\n# on this example.\n# The optimisation is even faster when the classes\n# are described with strings and not integers\n# as the final result (list of dictionaries) may copy\n# many times the same information with onnxruntime." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip b/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip index 75fdde2bc..2385a91aa 100644 Binary files a/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip and b/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip differ diff --git a/_downloads/74a60226f53cdc22e33c085b1b6a932d/plot_complex_pipeline.ipynb b/_downloads/74a60226f53cdc22e33c085b1b6a932d/plot_complex_pipeline.ipynb index 07deb3530..4331e3947 100644 --- a/_downloads/74a60226f53cdc22e33c085b1b6a932d/plot_complex_pipeline.ipynb +++ b/_downloads/74a60226f53cdc22e33c085b1b6a932d/plot_complex_pipeline.ipynb @@ -1,277 +1,277 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with ColumnTransformer\n\n*scikit-learn* recently shipped\n`ColumnTransformer `_\nwhich lets the user define complex pipeline where each\ncolumn may be preprocessed with a different transformer.\n*sklearn-onnx* still works in this case as shown in Section\n`l-complex-pipeline`.\n\n## Create and train a complex pipeline\n\nWe reuse the pipeline implemented in example\n`Column Transformer with Mixed Types\n`_.\nThere is one change because\n`ONNX-ML Imputer\n`_\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import os\nimport pprint\nimport pandas as pd\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport skl2onnx\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType, StringTensorType\nfrom skl2onnx.common.data_types import Int64TensorType\n\ntitanic_url = ('https://raw.githubusercontent.com/amueller/'\n 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\ndata = pd.read_csv(titanic_url)\nX = data.drop('survived', axis=1)\ny = data['survived']\nprint(data.dtypes)\n\n# SimpleImputer on string is not available for\n# string in ONNX-ML specifications.\n# So we do it beforehand.\nfor cat in ['embarked', 'sex', 'pclass']:\n X[cat].fillna('missing', inplace=True)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nnumeric_features = ['age', 'fare']\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\ncategorical_features = ['embarked', 'sex', 'pclass']\ncategorical_transformer = Pipeline(steps=[\n # --- SimpleImputer is not available for strings in ONNX-ML specifications.\n # ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n ])\n\nclf = Pipeline(steps=[('preprocessor', preprocessor),\n ('classifier', LogisticRegression(solver='lbfgs'))])\n\n\nclf.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define the inputs of the ONNX graph\n\n*sklearn-onnx* does not know the features used to train the model\nbut it needs to know which feature has which name.\nWe simply reuse the dataframe column definition.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(X_train.dtypes)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def convert_dataframe_schema(df, drop=None):\n inputs = []\n for k, v in zip(df.columns, df.dtypes):\n if drop is not None and k in drop:\n continue\n if v == 'int64':\n t = Int64TensorType([None, 1])\n elif v == 'float64':\n t = FloatTensorType([None, 1])\n else:\n t = StringTensorType([None, 1])\n inputs.append((k, t))\n return inputs\n\n\ninitial_inputs = convert_dataframe_schema(X_train)\n\npprint.pprint(initial_inputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Merging single column into vectors is not\nthe most efficient way to compute the prediction.\nIt could be done before converting the pipeline into a graph.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert the pipeline into ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions are more efficient if the graph is small.\nThat's why the converter checks that there is no unused input.\nThey need to be removed from the graph inputs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "to_drop = {'parch', 'sibsp', 'cabin', 'ticket',\n 'name', 'body', 'home.dest', 'boat'}\ninitial_inputs = convert_dataframe_schema(X_train, to_drop)\ntry:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*scikit-learn* does implicit conversions when it can.\n*sklearn-onnx* does not. The ONNX version of *OneHotEncoder*\nmust be applied on columns of the same type.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_inputs = convert_dataframe_schema(X_train, to_drop)\n\nmodel_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\n\n\n# And save.\nwith open(\"pipeline_titanic.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nFinal step, we need to ensure the converted model\nproduces the same predictions, labels and probabilities.\nLet's start with *scikit-learn*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", clf.predict(X_test[:5]))\nprint(\"predict_proba\", clf.predict_proba(X_test[:2]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\nWe need to remove the dropped columns and to change\nthe double vectors into float vectors as *onnxruntime*\ndoes not support double floats.\n*onnxruntime* does not accept *dataframe*.\ninputs must be given as a list of dictionary.\nLast detail, every column was described not really as a vector\nbut as a matrix of one column which explains the last line\nwith the *reshape*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X_test2 = X_test.drop(to_drop, axis=1)\ninputs = {c: X_test2[c].values for c in X_test2.columns}\nfor c in numeric_features:\n inputs[c] = inputs[c].astype(np.float32)\nfor k in inputs:\n inputs[k] = inputs[k].reshape((inputs[k].shape[0], 1))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are ready to run *onnxruntime*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_titanic.onnx\")\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The output of onnxruntime is a list of dictionaries.\nLet's swith to an array but that requires to convert again with\nan additional option zipmap.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12,\n options={id(clf): {'zipmap': False}})\nwith open(\"pipeline_titanic_nozipmap.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())\n\nsess = rt.InferenceSession(\"pipeline_titanic_nozipmap.onnx\")\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check they are the same.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "assert_almost_equal(clf.predict_proba(X_test), pred_onx[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\",\n color=\"yellow\",\n fillcolor=\"yellow\",\n style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic.dot')\n\nimage = plt.imread(\"pipeline_titanic.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with ColumnTransformer\n\n*scikit-learn* recently shipped\n[ColumnTransformer](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.compose.ColumnTransformer.html)\nwhich lets the user define complex pipeline where each\ncolumn may be preprocessed with a different transformer.\n*sklearn-onnx* still works in this case as shown in Section\n`l-complex-pipeline`.\n\n\n## Create and train a complex pipeline\n\nWe reuse the pipeline implemented in example\n[Column Transformer with Mixed Types](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py).\nThere is one change because\n[ONNX-ML Imputer](https://github.com/onnx/onnx/blob/master/docs/\nOperators-ml.md#ai.onnx.ml.Imputer)\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import os\nimport pprint\nimport pandas as pd\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport skl2onnx\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType, StringTensorType\nfrom skl2onnx.common.data_types import Int64TensorType\n\ntitanic_url = ('https://raw.githubusercontent.com/amueller/'\n 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\ndata = pd.read_csv(titanic_url)\nX = data.drop('survived', axis=1)\ny = data['survived']\nprint(data.dtypes)\n\n# SimpleImputer on string is not available for\n# string in ONNX-ML specifications.\n# So we do it beforehand.\nfor cat in ['embarked', 'sex', 'pclass']:\n X[cat].fillna('missing', inplace=True)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nnumeric_features = ['age', 'fare']\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\ncategorical_features = ['embarked', 'sex', 'pclass']\ncategorical_transformer = Pipeline(steps=[\n # --- SimpleImputer is not available for strings in ONNX-ML specifications.\n # ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n ])\n\nclf = Pipeline(steps=[('preprocessor', preprocessor),\n ('classifier', LogisticRegression(solver='lbfgs'))])\n\n\nclf.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define the inputs of the ONNX graph\n\n*sklearn-onnx* does not know the features used to train the model\nbut it needs to know which feature has which name.\nWe simply reuse the dataframe column definition.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(X_train.dtypes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def convert_dataframe_schema(df, drop=None):\n inputs = []\n for k, v in zip(df.columns, df.dtypes):\n if drop is not None and k in drop:\n continue\n if v == 'int64':\n t = Int64TensorType([None, 1])\n elif v == 'float64':\n t = FloatTensorType([None, 1])\n else:\n t = StringTensorType([None, 1])\n inputs.append((k, t))\n return inputs\n\n\ninitial_inputs = convert_dataframe_schema(X_train)\n\npprint.pprint(initial_inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Merging single column into vectors is not\nthe most efficient way to compute the prediction.\nIt could be done before converting the pipeline into a graph.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert the pipeline into ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions are more efficient if the graph is small.\nThat's why the converter checks that there is no unused input.\nThey need to be removed from the graph inputs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "to_drop = {'parch', 'sibsp', 'cabin', 'ticket',\n 'name', 'body', 'home.dest', 'boat'}\ninitial_inputs = convert_dataframe_schema(X_train, to_drop)\ntry:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*scikit-learn* does implicit conversions when it can.\n*sklearn-onnx* does not. The ONNX version of *OneHotEncoder*\nmust be applied on columns of the same type.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_inputs = convert_dataframe_schema(X_train, to_drop)\n\nmodel_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12)\n\n\n# And save.\nwith open(\"pipeline_titanic.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nFinal step, we need to ensure the converted model\nproduces the same predictions, labels and probabilities.\nLet's start with *scikit-learn*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", clf.predict(X_test[:5]))\nprint(\"predict_proba\", clf.predict_proba(X_test[:2]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\nWe need to remove the dropped columns and to change\nthe double vectors into float vectors as *onnxruntime*\ndoes not support double floats.\n*onnxruntime* does not accept *dataframe*.\ninputs must be given as a list of dictionary.\nLast detail, every column was described not really as a vector\nbut as a matrix of one column which explains the last line\nwith the *reshape*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_test2 = X_test.drop(to_drop, axis=1)\ninputs = {c: X_test2[c].values for c in X_test2.columns}\nfor c in numeric_features:\n inputs[c] = inputs[c].astype(np.float32)\nfor k in inputs:\n inputs[k] = inputs[k].reshape((inputs[k].shape[0], 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are ready to run *onnxruntime*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_titanic.onnx\",\n providers=[\"CPUExecutionProvider\"])\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output of onnxruntime is a list of dictionaries.\nLet's swith to an array but that requires to convert again with\nan additional option zipmap.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,\n target_opset=12,\n options={id(clf): {'zipmap': False}})\nwith open(\"pipeline_titanic_nozipmap.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())\n\nsess = rt.InferenceSession(\"pipeline_titanic_nozipmap.onnx\",\n providers=[\"CPUExecutionProvider\"])\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check they are the same.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "assert_almost_equal(clf.predict_proba(X_test), pred_onx[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n## Display the ONNX graph\n\nFinally, let's see the graph converted with *sklearn-onnx*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name,\n rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\",\n color=\"yellow\",\n fillcolor=\"yellow\",\n style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic.dot')\n\nimage = plt.imread(\"pipeline_titanic.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/792667019b9330f59baed50d2d5e2b70/plot_cast_transformer.ipynb b/_downloads/792667019b9330f59baed50d2d5e2b70/plot_cast_transformer.ipynb index 427421d6b..76fc85f8f 100644 --- a/_downloads/792667019b9330f59baed50d2d5e2b70/plot_cast_transformer.ipynb +++ b/_downloads/792667019b9330f59baed50d2d5e2b70/plot_cast_transformer.ipynb @@ -1,198 +1,198 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Discrepencies with StandardScaler\n\nA `StandardScaler `_ does\na very basic scaling. The conversion in ONNX assumes that\n``(x / y)`` is equivalent to ``x * ( 1 / y)`` but that's not\ntrue with float or double (see\n`Will the compiler optimize division into multiplication\n`_).\nEven if the difference is small,\nit may introduce discrepencies if the next step is\na decision tree. One small difference and the decision\nfollows another path in the tree. Let's see how to solve\nthat issue.\n\n## An example with fails\n\nThis is not a typical example, it is build to make it fails\nbased on the assumption ``(x / y)`` is usually different from\n``x * ( 1 / y)`` on a computer.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import onnxruntime\nimport onnx\nimport numpy\nimport os\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\nfrom skl2onnx.sklapi import CastTransformer\nfrom skl2onnx import to_onnx" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The weird data.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X, y = make_regression(10000, 10, random_state=3)\nX_train, X_test, y_train, _ = train_test_split(\n X, y, random_state=3)\nXi_train, yi_train = X_train.copy(), y_train.copy()\nXi_test = X_test.copy()\nfor i in range(X.shape[1]):\n Xi_train[:, i] = (Xi_train[:, i] * math.pi * 2 ** i).astype(\n np.int64)\n Xi_test[:, i] = (Xi_test[:, i] * math.pi * 2 ** i).astype(\n np.int64)\nmax_depth = 10\nXi_test = Xi_test.astype(np.float32)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A simple model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model1 = Pipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\nmodel1.fit(Xi_train, yi_train)\nexp1 = model1.predict(Xi_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Conversion into ONNX.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx1 = to_onnx(model1, X_train[:1].astype(np.float32))\nsess1 = InferenceSession(onx1.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And the maximum difference.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "got1 = sess1.run(None, {'X': Xi_test})[0]\n\n\ndef maxdiff(a1, a2):\n d = np.abs(a1.ravel() - a2.ravel())\n return d.max()\n\n\nmd1 = maxdiff(exp1, got1)\nprint(md1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n onx1.graph, name=onx1.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"cast1.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng cast1.dot')\n\nimage = plt.imread(\"cast1.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## New pipeline\n\nFixing the conversion requires to replace ``(x * (1 / y)``\nby ``(x / y)`` and this division must happen in double.\nBy default, the *sklearn-onnx* assumes every\ncomputer should happen in float. `ONNX 1.7 specifications\n`_\ndoes not support double scaling (input and output does,\nbut not the parameters). The solution needs to\nchange the conversion (remove node Scaler by using option\n`'div'`) and to use double by inserting an explicit\nCast.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model2 = Pipeline([\n ('cast64', CastTransformer(dtype=np.float64)),\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel2.fit(Xi_train, yi_train)\nexp2 = model2.predict(Xi_test)\n\nonx2 = to_onnx(model2, X_train[:1].astype(np.float32),\n options={StandardScaler: {'div': 'div_cast'}})\n\nsess2 = InferenceSession(onx2.SerializeToString())\ngot2 = sess2.run(None, {'X': Xi_test})[0]\nmd2 = maxdiff(exp2, got2)\n\nprint(md2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n onx2.graph, name=onx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"cast2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng cast2.dot')\n\nimage = plt.imread(\"cast2.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sklearn # noqa\nprint(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Discrepencies with StandardScaler\n\nA [StandardScaler](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.preprocessing.StandardScaler.html) does\na very basic scaling. The conversion in ONNX assumes that\n``(x / y)`` is equivalent to ``x * ( 1 / y)`` but that's not\ntrue with float or double (see\n[Will the compiler optimize division into multiplication](https://stackoverflow.com/questions/35506226/\nwill-the-compiler-optimize-division-into-multiplication)).\nEven if the difference is small,\nit may introduce discrepencies if the next step is\na decision tree. One small difference and the decision\nfollows another path in the tree. Let's see how to solve\nthat issue.\n\n## An example with fails\n\nThis is not a typical example, it is build to make it fails\nbased on the assumption ``(x / y)`` is usually different from\n``x * ( 1 / y)`` on a computer.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import onnxruntime\nimport onnx\nimport numpy\nimport os\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\nfrom skl2onnx.sklapi import CastTransformer\nfrom skl2onnx import to_onnx" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The weird data.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X, y = make_regression(10000, 10, random_state=3)\nX_train, X_test, y_train, _ = train_test_split(\n X, y, random_state=3)\nXi_train, yi_train = X_train.copy(), y_train.copy()\nXi_test = X_test.copy()\nfor i in range(X.shape[1]):\n Xi_train[:, i] = (Xi_train[:, i] * math.pi * 2 ** i).astype(\n np.int64)\n Xi_test[:, i] = (Xi_test[:, i] * math.pi * 2 ** i).astype(\n np.int64)\nmax_depth = 10\nXi_test = Xi_test.astype(np.float32)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A simple model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model1 = Pipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\nmodel1.fit(Xi_train, yi_train)\nexp1 = model1.predict(Xi_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Conversion into ONNX.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx1 = to_onnx(model1, X_train[:1].astype(np.float32),\n target_opset=15)\nsess1 = InferenceSession(onx1.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And the maximum difference.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "got1 = sess1.run(None, {'X': Xi_test})[0]\n\n\ndef maxdiff(a1, a2):\n d = np.abs(a1.ravel() - a2.ravel())\n return d.max()\n\n\nmd1 = maxdiff(exp1, got1)\nprint(md1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n onx1.graph, name=onx1.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"cast1.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng cast1.dot')\n\nimage = plt.imread(\"cast1.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## New pipeline\n\nFixing the conversion requires to replace ``(x * (1 / y)``\nby ``(x / y)`` and this division must happen in double.\nBy default, the *sklearn-onnx* assumes every\ncomputer should happen in float. [ONNX 1.7 specifications](https://github.com/onnx/onnx/blob/master/docs/\nOperators-ml.md#ai.onnx.ml.Scaler)\ndoes not support double scaling (input and output does,\nbut not the parameters). The solution needs to\nchange the conversion (remove node Scaler by using option\n`'div'`) and to use double by inserting an explicit\nCast.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model2 = Pipeline([\n ('cast64', CastTransformer(dtype=np.float64)),\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel2.fit(Xi_train, yi_train)\nexp2 = model2.predict(Xi_test)\n\nonx2 = to_onnx(model2, X_train[:1].astype(np.float32),\n options={StandardScaler: {'div': 'div_cast'}},\n target_opset=15)\n\nsess2 = InferenceSession(onx2.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\ngot2 = sess2.run(None, {'X': Xi_test})[0]\nmd2 = maxdiff(exp2, got2)\n\nprint(md2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n onx2.graph, name=onx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"cast2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng cast2.dot')\n\nimage = plt.imread(\"cast2.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import sklearn # noqa\nprint(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nimport skl2onnx # noqa\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/7c4689d9934d62706ce0c56c0537a320/plot_catwoe_transformer.ipynb b/_downloads/7c4689d9934d62706ce0c56c0537a320/plot_catwoe_transformer.ipynb index 1afc2084c..c143e230a 100644 --- a/_downloads/7c4689d9934d62706ce0c56c0537a320/plot_catwoe_transformer.ipynb +++ b/_downloads/7c4689d9934d62706ce0c56c0537a320/plot_catwoe_transformer.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Converter for WOEEncoder from categorical_encoder\n\n`WOEEncoder `_\nis a transformer implemented in `categorical_encoder\n`_ and as such,\nany converter would not be included in *sklearn-onnx* which only\nimplements converters for *scikit-learn* models. Anyhow, this\nexample demonstrates how to implement a custom converter\nfor *WOEEncoder*. This code is not fully tested for all possible\ncases the original encoder can handle.\n\n.. index:: WOE, WOEEncoder\n\n## A simple example\n\nLet's take the `Iris dataset\n`_.\nEvery feature is converter into integer.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy as np\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import OrdinalEncoder as SklOrdinalEncoder\nfrom category_encoders import WOEEncoder, OrdinalEncoder\nfrom skl2onnx import update_registered_converter, to_onnx, get_model_alias\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.common.utils import check_input_and_output_numbers\nfrom skl2onnx.algebra.onnx_ops import OnnxCast\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx.sklapi import WOETransformer\nimport skl2onnx.sklapi.register # noqa\n\ndata = load_iris()\nX, y = data.data, data.target\nX = X.astype(np.int64)[:, :2]\ny = (y == 2).astype(np.int64)\n\nwoe = WOEEncoder(cols=[0]).fit(X, y)\nprint(woe.transform(X[:5]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's look into the trained parameters of the model.\nIt appears that WOEEncoder uses an OrdinalEncoder\nbut not the one from scikit-learn. We need to add a\nconverter for this model tool.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"encoder\", type(woe.ordinal_encoder), woe.ordinal_encoder)\nprint(\"mapping\", woe.mapping)\nprint(\"encoder.mapping\", woe.ordinal_encoder.mapping)\nprint(\"encoder.cols\", woe.ordinal_encoder.cols)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom converter for OrdinalEncoder\n\nWe start from example `l-plot-custom-converter`\nand then write the conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def ordenc_to_sklearn(op_mapping):\n \"Converts OrdinalEncoder mapping to scikit-learn OrdinalEncoder.\"\n cats = []\n for column_map in op_mapping:\n col = column_map['col']\n while len(cats) <= col:\n cats.append(None)\n mapping = column_map['mapping']\n res = []\n for i in range(mapping.shape[0]):\n if np.isnan(mapping.index[i]):\n continue\n ind = mapping.iloc[i]\n while len(res) <= ind:\n res.append(0)\n res[ind] = mapping.index[i]\n cats[col] = np.array(res, dtype=np.int64)\n\n skl_ord = SklOrdinalEncoder(categories=cats, dtype=np.int64)\n skl_ord.categories_ = cats\n return skl_ord\n\n\ndef ordinal_encoder_shape_calculator(operator):\n check_input_and_output_numbers(\n operator, input_count_range=1, output_count_range=1)\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].get_first_dimension()\n shape = operator.inputs[0].type.shape\n second_dim = None if len(shape) != 2 else shape[1]\n output_type = input_type([input_dim, second_dim])\n operator.outputs[0].type = output_type\n\n\ndef ordinal_encoder_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n X = operator.inputs[0]\n\n skl_ord = ordenc_to_sklearn(op.mapping)\n cat = OnnxSubEstimator(skl_ord, X, op_version=opv,\n output_names=operator.outputs[:1])\n cat.add_to(scope, container)\n\n\nupdate_registered_converter(\n OrdinalEncoder, \"CategoricalEncoderOrdinalEncoder\",\n ordinal_encoder_shape_calculator,\n ordinal_encoder_converter)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's compute the output one a short example.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "enc = OrdinalEncoder(cols=[0, 1])\nenc.fit(X)\nprint(enc.transform(X[:5]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check the ONNX conversion produces the same results.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ord_onx = to_onnx(enc, X[:1], target_opset=14)\nsess = InferenceSession(ord_onx.SerializeToString())\nprint(sess.run(None, {'X': X[:5]})[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That works.\n\n## Custom converter for WOEEncoder\n\nWe start from example `l-plot-custom-converter`\nand then write the conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def woeenc_to_sklearn(op_mapping):\n \"Converts WOEEncoder mapping to scikit-learn OrdinalEncoder.\"\n cats = []\n ws = []\n for column_map in op_mapping.items():\n col = column_map[0]\n while len(cats) <= col:\n cats.append('passthrough')\n ws.append(None)\n mapping = column_map[1]\n intervals = []\n weights = []\n for i in range(mapping.shape[0]):\n ind = mapping.index[i]\n if ind < 0:\n continue\n intervals.append((float(ind - 1), float(ind), False, True))\n weights.append(mapping.iloc[i])\n cats[col] = intervals\n ws[col] = weights\n\n skl = WOETransformer(intervals=cats, weights=ws, onehot=False)\n skl.fit(None)\n return skl\n\n\ndef woe_encoder_parser(\n scope, model, inputs, custom_parsers=None):\n if len(inputs) != 1:\n raise RuntimeError(\n \"Unexpected number of inputs: %d != 1.\" % len(inputs))\n if inputs[0].type is None:\n raise RuntimeError(\n \"Unexpected type: %r.\" % (inputs[0], ))\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n this_operator.inputs.append(inputs[0])\n this_operator.outputs.append(\n scope.declare_local_variable('catwoe', FloatTensorType()))\n return this_operator.outputs\n\n\ndef woe_encoder_shape_calculator(operator):\n check_input_and_output_numbers(\n operator, input_count_range=1, output_count_range=1)\n input_dim = operator.inputs[0].get_first_dimension()\n shape = operator.inputs[0].type.shape\n second_dim = None if len(shape) != 2 else shape[1]\n output_type = FloatTensorType([input_dim, second_dim])\n operator.outputs[0].type = output_type\n\n\ndef woe_encoder_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n X = operator.inputs[0]\n\n sub = OnnxSubEstimator(op.ordinal_encoder, X,\n op_version=opv)\n cast = OnnxCast(sub, op_version=opv, to=np.float32)\n skl_ord = woeenc_to_sklearn(op.mapping)\n cat = OnnxSubEstimator(skl_ord, cast, op_version=opv,\n output_names=operator.outputs[:1],\n input_types=[FloatTensorType()])\n cat.add_to(scope, container)\n\n\nupdate_registered_converter(\n WOEEncoder, \"CategoricalEncoderWOEEncoder\",\n woe_encoder_shape_calculator,\n woe_encoder_converter,\n parser=woe_encoder_parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's compute the output one a short example.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "woe = WOEEncoder(cols=[0, 1]).fit(X, y)\nprint(woe.transform(X[:5]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check the ONNX conversion produces the same results.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "woe_onx = to_onnx(woe, X[:1], target_opset=14)\nsess = InferenceSession(woe_onx.SerializeToString())\nprint(sess.run(None, {'X': X[:5]})[0])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Converter for WOEEncoder from categorical_encoder\n\n[WOEEncoder](https://contrib.scikit-learn.org/category_encoders/woe.html)\nis a transformer implemented in [categorical_encoder](https://contrib.scikit-learn.org/category_encoders/) and as such,\nany converter would not be included in *sklearn-onnx* which only\nimplements converters for *scikit-learn* models. Anyhow, this\nexample demonstrates how to implement a custom converter\nfor *WOEEncoder*. This code is not fully tested for all possible\ncases the original encoder can handle.\n\n.. index:: WOE, WOEEncoder\n\n## A simple example\n\nLet's take the [Iris dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html).\nEvery feature is converter into integer.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import OrdinalEncoder as SklOrdinalEncoder\nfrom category_encoders import WOEEncoder, OrdinalEncoder\nfrom skl2onnx import update_registered_converter, to_onnx, get_model_alias\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.common.utils import check_input_and_output_numbers\nfrom skl2onnx.algebra.onnx_ops import OnnxCast\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nfrom skl2onnx.sklapi import WOETransformer\nimport skl2onnx.sklapi.register # noqa\n\ndata = load_iris()\nX, y = data.data, data.target\nX = X.astype(np.int64)[:, :2]\ny = (y == 2).astype(np.int64)\n\nwoe = WOEEncoder(cols=[0]).fit(X, y)\nprint(woe.transform(X[:5]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look into the trained parameters of the model.\nIt appears that WOEEncoder uses an OrdinalEncoder\nbut not the one from scikit-learn. We need to add a\nconverter for this model tool.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"encoder\", type(woe.ordinal_encoder), woe.ordinal_encoder)\nprint(\"mapping\", woe.mapping)\nprint(\"encoder.mapping\", woe.ordinal_encoder.mapping)\nprint(\"encoder.cols\", woe.ordinal_encoder.cols)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom converter for OrdinalEncoder\n\nWe start from example `l-plot-custom-converter`\nand then write the conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def ordenc_to_sklearn(op_mapping):\n \"Converts OrdinalEncoder mapping to scikit-learn OrdinalEncoder.\"\n cats = []\n for column_map in op_mapping:\n col = column_map['col']\n while len(cats) <= col:\n cats.append(None)\n mapping = column_map['mapping']\n res = []\n for i in range(mapping.shape[0]):\n if np.isnan(mapping.index[i]):\n continue\n ind = mapping.iloc[i]\n while len(res) <= ind:\n res.append(0)\n res[ind] = mapping.index[i]\n cats[col] = np.array(res, dtype=np.int64)\n\n skl_ord = SklOrdinalEncoder(categories=cats, dtype=np.int64)\n skl_ord.categories_ = cats\n return skl_ord\n\n\ndef ordinal_encoder_shape_calculator(operator):\n check_input_and_output_numbers(\n operator, input_count_range=1, output_count_range=1)\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].get_first_dimension()\n shape = operator.inputs[0].type.shape\n second_dim = None if len(shape) != 2 else shape[1]\n output_type = input_type([input_dim, second_dim])\n operator.outputs[0].type = output_type\n\n\ndef ordinal_encoder_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n X = operator.inputs[0]\n\n skl_ord = ordenc_to_sklearn(op.mapping)\n cat = OnnxSubEstimator(skl_ord, X, op_version=opv,\n output_names=operator.outputs[:1])\n cat.add_to(scope, container)\n\n\nupdate_registered_converter(\n OrdinalEncoder, \"CategoricalEncoderOrdinalEncoder\",\n ordinal_encoder_shape_calculator,\n ordinal_encoder_converter)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's compute the output one a short example.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "enc = OrdinalEncoder(cols=[0, 1])\nenc.fit(X)\nprint(enc.transform(X[:5]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check the ONNX conversion produces the same results.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ord_onx = to_onnx(enc, X[:1], target_opset=14)\nsess = InferenceSession(ord_onx.SerializeToString())\nprint(sess.run(None, {'X': X[:5]})[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That works.\n\n## Custom converter for WOEEncoder\n\nWe start from example `l-plot-custom-converter`\nand then write the conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def woeenc_to_sklearn(op_mapping):\n \"Converts WOEEncoder mapping to scikit-learn OrdinalEncoder.\"\n cats = []\n ws = []\n for column_map in op_mapping.items():\n col = column_map[0]\n while len(cats) <= col:\n cats.append('passthrough')\n ws.append(None)\n mapping = column_map[1]\n intervals = []\n weights = []\n for i in range(mapping.shape[0]):\n ind = mapping.index[i]\n if ind < 0:\n continue\n intervals.append((float(ind - 1), float(ind), False, True))\n weights.append(mapping.iloc[i])\n cats[col] = intervals\n ws[col] = weights\n\n skl = WOETransformer(intervals=cats, weights=ws, onehot=False)\n skl.fit(None)\n return skl\n\n\ndef woe_encoder_parser(\n scope, model, inputs, custom_parsers=None):\n if len(inputs) != 1:\n raise RuntimeError(\n \"Unexpected number of inputs: %d != 1.\" % len(inputs))\n if inputs[0].type is None:\n raise RuntimeError(\n \"Unexpected type: %r.\" % (inputs[0], ))\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n this_operator.inputs.append(inputs[0])\n this_operator.outputs.append(\n scope.declare_local_variable('catwoe', FloatTensorType()))\n return this_operator.outputs\n\n\ndef woe_encoder_shape_calculator(operator):\n check_input_and_output_numbers(\n operator, input_count_range=1, output_count_range=1)\n input_dim = operator.inputs[0].get_first_dimension()\n shape = operator.inputs[0].type.shape\n second_dim = None if len(shape) != 2 else shape[1]\n output_type = FloatTensorType([input_dim, second_dim])\n operator.outputs[0].type = output_type\n\n\ndef woe_encoder_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n X = operator.inputs[0]\n\n sub = OnnxSubEstimator(op.ordinal_encoder, X,\n op_version=opv)\n cast = OnnxCast(sub, op_version=opv, to=np.float32)\n skl_ord = woeenc_to_sklearn(op.mapping)\n cat = OnnxSubEstimator(skl_ord, cast, op_version=opv,\n output_names=operator.outputs[:1],\n input_types=[FloatTensorType()])\n cat.add_to(scope, container)\n\n\nupdate_registered_converter(\n WOEEncoder, \"CategoricalEncoderWOEEncoder\",\n woe_encoder_shape_calculator,\n woe_encoder_converter,\n parser=woe_encoder_parser)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's compute the output one a short example.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "woe = WOEEncoder(cols=[0, 1]).fit(X, y)\nprint(woe.transform(X[:5]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check the ONNX conversion produces the same results.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "woe_onx = to_onnx(woe, X[:1], target_opset=14)\nsess = InferenceSession(woe_onx.SerializeToString())\nprint(sess.run(None, {'X': X[:5]})[0])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/7dbb342391c27be404a8aefcfc5a9af6/plot_jcustom_syntax.ipynb b/_downloads/7dbb342391c27be404a8aefcfc5a9af6/plot_jcustom_syntax.ipynb index 8ce9dc5ae..98605f87c 100644 --- a/_downloads/7dbb342391c27be404a8aefcfc5a9af6/plot_jcustom_syntax.ipynb +++ b/_downloads/7dbb342391c27be404a8aefcfc5a9af6/plot_jcustom_syntax.ipynb @@ -1,133 +1,133 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Two ways to implement a converter\n\n.. index:: syntax\n\nThere are two ways to write a converter. The first one\nis less verbose and easier to understand\n(see `k_means.py `_).\nThe other is very verbose (see `ada_boost.py `_\nfor an example).\n\nThe first way is used in `l-plot-custom-converter`.\nThis one demonstrates the second way which is usually the one\nused in other converter library. It is more verbose.\n\n\n## Custom model\n\nIt basically copies what is in example\n``l-plot-custom-converter`.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from skl2onnx.common.data_types import guess_proto_type\nfrom onnxconverter_common.onnx_ops import apply_sub\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx import to_onnx\nimport numpy\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX\n\nThe shape calculator does not change.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n # The shape may be unknown. *get_first_dimension*\n # returns the appropriate value, None in most cases\n # meaning the transformer can process any batch of observations.\n input_dim = operator.inputs[0].get_first_dimension()\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The converter is different.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n proto_dtype = guess_proto_type(X.type)\n\n mean_name = scope.get_unique_variable_name('mean')\n container.add_initializer(mean_name, proto_dtype,\n op.mean_.shape, list(op.mean_.ravel()))\n\n coef_name = scope.get_unique_variable_name('coef')\n container.add_initializer(coef_name, proto_dtype,\n op.coef_.shape, list(op.coef_.ravel()))\n\n op_name = scope.get_unique_operator_name('sub')\n sub_name = scope.get_unique_variable_name('sub')\n # This function is defined in package onnxconverter_common.\n # Most common operators can be added to the graph with\n # these functions. It handles the case when specifications\n # changed accross opsets (a parameter becomes an input\n # for example).\n apply_sub(scope, [X.full_name, mean_name], sub_name, container,\n operator_name=op_name)\n\n op_name = scope.get_unique_operator_name('matmul')\n container.add_node(\n 'MatMul', [sub_name, coef_name],\n out[0].full_name, name=op_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We need to let *skl2onnx* know about the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check it works as well with double.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The differences are smaller with double as expected.\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Two ways to implement a converter\n\n.. index:: syntax\n\nThere are two ways to write a converter. The first one\nis less verbose and easier to understand\n(see [k_means.py](https://github.com/onnx/sklearn-onnx/blob/\nmaster/skl2onnx/operator_converters/k_means.py)).\nThe other is very verbose (see [ada_boost.py](https://github.com/onnx/\nsklearn-onnx/blob/master/skl2onnx/operator_converters/ada_boost.py)\nfor an example).\n\nThe first way is used in `l-plot-custom-converter`.\nThis one demonstrates the second way which is usually the one\nused in other converter library. It is more verbose.\n\n## Custom model\n\nIt basically copies what is in example\n``l-plot-custom-converter`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from skl2onnx.common.data_types import guess_proto_type\nfrom onnxconverter_common.onnx_ops import apply_sub\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx import to_onnx\nimport numpy\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX\n\nThe shape calculator does not change.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n # The shape may be unknown. *get_first_dimension*\n # returns the appropriate value, None in most cases\n # meaning the transformer can process any batch of observations.\n input_dim = operator.inputs[0].get_first_dimension()\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The converter is different.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n proto_dtype = guess_proto_type(X.type)\n\n mean_name = scope.get_unique_variable_name('mean')\n container.add_initializer(mean_name, proto_dtype,\n op.mean_.shape, list(op.mean_.ravel()))\n\n coef_name = scope.get_unique_variable_name('coef')\n container.add_initializer(coef_name, proto_dtype,\n op.coef_.shape, list(op.coef_.ravel()))\n\n op_name = scope.get_unique_operator_name('sub')\n sub_name = scope.get_unique_variable_name('sub')\n # This function is defined in package onnxconverter_common.\n # Most common operators can be added to the graph with\n # these functions. It handles the case when specifications\n # changed accross opsets (a parameter becomes an input\n # for example).\n apply_sub(scope, [X.full_name, mean_name], sub_name, container,\n operator_name=op_name)\n\n op_name = scope.get_unique_operator_name('matmul')\n container.add_node(\n 'MatMul', [sub_name, coef_name],\n out[0].full_name, name=op_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to let *skl2onnx* know about the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check it works as well with double.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The differences are smaller with double as expected.\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/7e51ee96c7d38b86e7516908766735ec/plot_custom_parser_alternative.py b/_downloads/7e51ee96c7d38b86e7516908766735ec/plot_custom_parser_alternative.py index 6a90dd095..617d4b838 100644 --- a/_downloads/7e51ee96c7d38b86e7516908766735ec/plot_custom_parser_alternative.py +++ b/_downloads/7e51ee96c7d38b86e7516908766735ec/plot_custom_parser_alternative.py @@ -19,9 +19,6 @@ above a given threshold. That's implemented in method *validate*. -.. contents:: - :local: - Iris and scoring ++++++++++++++++ @@ -45,7 +42,7 @@ from skl2onnx.proto import onnx_proto from skl2onnx.common.data_types import FloatTensorType, Int64TensorType from skl2onnx.algebra.onnx_ops import ( - OnnxGreater, OnnxCast, OnnxReduceMax, OnnxIdentity + OnnxGreater, OnnxCast, OnnxReduceMaxApi18, OnnxIdentity ) from skl2onnx.algebra.onnx_operator import OnnxSubEstimator import matplotlib.pyplot as plt @@ -150,7 +147,7 @@ def validator_classifier_converter(scope, operator, container): onnx_op = OnnxSubEstimator(model, input0, op_version=opv, options={'zipmap': False}) - rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv) + rmax = OnnxReduceMaxApi18(onnx_op[1], axes=[1], keepdims=0, op_version=opv) great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32), op_version=opv) valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, diff --git a/_downloads/8a6f42013434836e3c442883f37dbd75/plot_icustom_converter.ipynb b/_downloads/8a6f42013434836e3c442883f37dbd75/plot_icustom_converter.ipynb index 7ab52f7ad..f6e8ce33e 100644 --- a/_downloads/8a6f42013434836e3c442883f37dbd75/plot_icustom_converter.ipynb +++ b/_downloads/8a6f42013434836e3c442883f37dbd75/plot_icustom_converter.ipynb @@ -1,187 +1,187 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Implement a new converter\n\n.. index:: custom converter\n\nBy default, :epkg:`sklearn-onnx` assumes that a classifier\nhas two outputs (label and probabilities), a regressor\nhas one output (prediction), a transform has one output\n(the transformed data). This example assumes the model to\nconvert is one of them. In that case, a new converter requires\nin fact two functions:\n\n* a shape calculator: it defines the output shape and type\n based on the model and input type,\n* a converter: it actually builds an ONNX graph equivalent\n to the prediction function to be converted.\n\nThis example implements both components for a new model.\n\n## Custom model\n\nLet's implement a simple custom model using\n:epkg:`scikit-learn` API. The model is preprocessing\nwhich decorrelates correlated random variables.\nIf *X* is a matrix of features, $V=\\frac{1}{n}X'X$\nis the covariance matrix. We compute $X V^{1/2}$.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx import to_onnx\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.algebra.onnx_ops import OnnxMatMul, OnnxSub\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndef test_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = DecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n dec = DecorrelateTransformer(alpha=1e-10)\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.mean_, dec2.mean_)\n assert_almost_equal(dec.coef_, dec2.coef_)\n assert id(dec.mean_) != id(dec2.mean_)\n assert id(dec.coef_) != id(dec2.coef_)\n\n\ntest_decorrelate_transformer()\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Trained coefficients.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(dec.mean_)\nprint(dec.coef_)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(dec, X.astype(numpy.float32))\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This error means there is no converter associated\nto *DecorrelateTransformer*. Let's implement it.\nIt requires the two following\nfunctions, a shape calculator and a converter\nwith the same signature as below.\nFirst the shape calculator. We retrieve the input type\nadd tells the output type has the same type,\nthe same number of rows and a specific number of columns.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n # The shape may be unknown. *get_first_dimension*\n # returns the appropriate value, None in most cases\n # meaning the transformer can process any batch of observations.\n input_dim = operator.inputs[0].get_first_dimension()\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The converter. One thing we need to pay attention to\nis the target opset. This information is important\nto make sure that every node is defined following the\nspecifications of that opset.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n dtype = guess_numpy_type(X.type)\n\n # We tell in ONNX language how to compute the unique output.\n # op_version=opv tells which opset is requested\n Y = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We need to let *skl2onnx* know about the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's check it works as well with double.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The differences are smaller with double as expected.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Implement a new converter\n\n.. index:: custom converter\n\nBy default, :epkg:`sklearn-onnx` assumes that a classifier\nhas two outputs (label and probabilities), a regressor\nhas one output (prediction), a transform has one output\n(the transformed data). This example assumes the model to\nconvert is one of them. In that case, a new converter requires\nin fact two functions:\n\n* a shape calculator: it defines the output shape and type\n based on the model and input type,\n* a converter: it actually builds an ONNX graph equivalent\n to the prediction function to be converted.\n\nThis example implements both components for a new model.\n\n## Custom model\n\nLet's implement a simple custom model using\n:epkg:`scikit-learn` API. The model is preprocessing\nwhich decorrelates correlated random variables.\nIf *X* is a matrix of features, $V=\\frac{1}{n}X'X$\nis the covariance matrix. We compute $X V^{1/2}$.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport pickle\nfrom io import BytesIO\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx import to_onnx\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.algebra.onnx_ops import OnnxMatMul, OnnxSub\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndef test_decorrelate_transformer():\n data = load_iris()\n X = data.data\n\n dec = DecorrelateTransformer()\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n dec = DecorrelateTransformer(alpha=1e-10)\n dec.fit(X)\n pred = dec.transform(X)\n cov = pred.T @ pred\n cov /= cov[0, 0]\n assert_almost_equal(numpy.identity(4), cov)\n\n st = BytesIO()\n pickle.dump(dec, st)\n dec2 = pickle.load(BytesIO(st.getvalue()))\n assert_almost_equal(dec.mean_, dec2.mean_)\n assert_almost_equal(dec.coef_, dec2.coef_)\n assert id(dec.mean_) != id(dec2.mean_)\n assert id(dec.coef_) != id(dec2.coef_)\n\n\ntest_decorrelate_transformer()\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Trained coefficients.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(dec.mean_)\nprint(dec.coef_)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(dec, X.astype(numpy.float32))\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This error means there is no converter associated\nto *DecorrelateTransformer*. Let's implement it.\nIt requires the two following\nfunctions, a shape calculator and a converter\nwith the same signature as below.\nFirst the shape calculator. We retrieve the input type\nadd tells the output type has the same type,\nthe same number of rows and a specific number of columns.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n # The shape may be unknown. *get_first_dimension*\n # returns the appropriate value, None in most cases\n # meaning the transformer can process any batch of observations.\n input_dim = operator.inputs[0].get_first_dimension()\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The converter. One thing we need to pay attention to\nis the target opset. This information is important\nto make sure that every node is defined following the\nspecifications of that opset.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n dtype = guess_numpy_type(X.type)\n\n # We tell in ONNX language how to compute the unique output.\n # op_version=opv tells which opset is requested\n Y = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to let *skl2onnx* know about the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter)\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check it works as well with double.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(dec, X.astype(numpy.float64))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float64))\ngot = sess.run(None, {'X': X.astype(numpy.float64)})[0]\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The differences are smaller with double as expected.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/8a92d2a146ad695f5076e82a21f7caef/plot_qextend_onnxruntime.ipynb b/_downloads/8a92d2a146ad695f5076e82a21f7caef/plot_qextend_onnxruntime.ipynb index 7f51e6317..0a62dcc09 100644 --- a/_downloads/8a92d2a146ad695f5076e82a21f7caef/plot_qextend_onnxruntime.ipynb +++ b/_downloads/8a92d2a146ad695f5076e82a21f7caef/plot_qextend_onnxruntime.ipynb @@ -1,43 +1,43 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Fast runtime with onnxruntime\n\n:epkg:`ONNX operators` does not contain operator\nfrom :epkg:`numpy`. There is no operator for\n`solve `_ but this one\nis needed to implement the prediction function\nof model :epkg:`NMF`. The converter can be written\nincluding a new ONNX operator but then it requires a\nruntime for it to be tested. Example\n`l-extend-python-runtime` shows how to do that\nwith :epkg:`mlprodict`. Doing the same with\n:epkg:`onnxruntime` is more ambitious as it requires\nC++...\n\n*to be continued*\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Fast runtime with onnxruntime\n\n:epkg:`ONNX operators` does not contain operator\nfrom :epkg:`numpy`. There is no operator for\n[solve](https://numpy.org/doc/stable/reference/\ngenerated/numpy.linalg.solve.html) but this one\nis needed to implement the prediction function\nof model :epkg:`NMF`. The converter can be written\nincluding a new ONNX operator but then it requires a\nruntime for it to be tested. Example\n`l-extend-python-runtime` shows how to do that\nwith :epkg:`mlprodict`. Doing the same with\n:epkg:`onnxruntime` is more ambitious as it requires\nC++...\n\n*to be continued*\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/8d7e7f9159c7d5c2ceb0a92bbf84f615/plot_mcustom_parser.py b/_downloads/8d7e7f9159c7d5c2ceb0a92bbf84f615/plot_mcustom_parser.py index 5d2da8de8..0da42a868 100644 --- a/_downloads/8d7e7f9159c7d5c2ceb0a92bbf84f615/plot_mcustom_parser.py +++ b/_downloads/8d7e7f9159c7d5c2ceb0a92bbf84f615/plot_mcustom_parser.py @@ -21,9 +21,6 @@ of using a parser. By default, a transformer only returns one output but both are needed. -.. contents:: - :local: - A new transformer +++++++++++++++++ """ diff --git a/_downloads/8e8cf26a019bd3a2954fe2ccb430d818/plot_dbegin_options.ipynb b/_downloads/8e8cf26a019bd3a2954fe2ccb430d818/plot_dbegin_options.ipynb index 5546af285..612c104fc 100644 --- a/_downloads/8e8cf26a019bd3a2954fe2ccb430d818/plot_dbegin_options.ipynb +++ b/_downloads/8e8cf26a019bd3a2954fe2ccb430d818/plot_dbegin_options.ipynb @@ -1,331 +1,331 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# One model, many possible conversions with options\n\n.. index:: options\n\nThere is not one way to convert a model. A new operator\nmight have been added in a newer version of :epkg:`ONNX`\nand that speeds up the converted model. The rational choice\nwould be to use this new operator but what means the associated\nruntime has an implementation for it. What if two different\nusers needs two different conversion for the same model?\nLet's see how this may be done.\n\n\n## Option *zipmap*\n\nEvery classifier is by design converted into an ONNX graph which outputs\ntwo results: the predicted label and the prediction probabilites\nfor every label. By default, the labels are integers and the\nprobabilites are stored in dictionaries. That's the purpose\nof operator *ZipMap* added at the end of the following graph.\n\n.. gdot::\n :script: DOT-SECTION\n\n import numpy\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from sklearn.linear_model import LogisticRegression\n from skl2onnx import to_onnx\n from mlprodict.onnxrt import OnnxInference\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, _, y_train, __ = train_test_split(X, y, random_state=11)\n clr = LogisticRegression()\n clr.fit(X_train, y_train)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32))\n oinf = OnnxInference(model_def)\n print(\"DOT-SECTION\", oinf.to_dot())\n\nThis operator is not really efficient as it copies every probabilies and\nlabels in a different container. This time is usually significant for\nsmall classifiers. Then it makes sense to remove it.\n\n.. gdot::\n :script: DOT-SECTION\n\n import numpy\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from sklearn.linear_model import LogisticRegression\n from skl2onnx import to_onnx\n from mlprodict.onnxrt import OnnxInference\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, _, y_train, __ = train_test_split(X, y, random_state=11)\n clr = LogisticRegression()\n clr.fit(X_train, y_train)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={LogisticRegression: {'zipmap': False}})\n oinf = OnnxInference(model_def)\n print(\"DOT-SECTION\", oinf.to_dot())\n\nThere might be in the graph many classifiers, it is important to have\na way to specify which classifier should keep its *ZipMap*\nand which is not. So it is possible to specify options by id.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pprint import pformat\nimport numpy\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom skl2onnx.common._registration import _converter_pool\nfrom skl2onnx import to_onnx\nfrom onnxruntime import InferenceSession\nfrom mlprodict.onnxrt import OnnxInference\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)\nclr = LogisticRegression()\nclr.fit(X_train, y_train)\n\nmodel_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={id(clr): {'zipmap': False}})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Visually.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We need to compare that kind of visualisation to\nwhat it would give with operator *ZipMap*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_def = to_onnx(clr, X_train.astype(numpy.float32))\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Visually.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using function *id* has one flaw: it is not pickable.\nIt is just better to use strings.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={'zipmap': False})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Visually.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option in a pipeline\n\nIn a pipeline, :epkg:`sklearn-onnx` uses the same\nname convention.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pipe = Pipeline([\n ('norm', MinMaxScaler()),\n ('clr', LogisticRegression())\n])\npipe.fit(X_train, y_train)\n\nmodel_def = to_onnx(pipe, X_train.astype(numpy.float32),\n options={'clr__zipmap': False})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Visually.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option *raw_scores*\n\nEvery classifier is converted in a graph which\nreturns probabilities by default. But many models\ncompute unscaled *raw_scores*.\nFirst, with probabilities:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pipe = Pipeline([\n ('norm', MinMaxScaler()),\n ('clr', LogisticRegression())\n])\npipe.fit(X_train, y_train)\n\nmodel_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe): {'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then with raw scores:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe): {'raw_scores': True, 'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It did not seem to work... We need to tell\nthat applies on a specific part of the pipeline\nand not the whole pipeline.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe.steps[1][1]): {'raw_scores': True, 'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are negative values. That works.\nStrings are still easier to use.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={'clr__raw_scores': True, 'clr__zipmap': False})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Negative figures. We still have raw scores.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option *decision_path*\n\n*scikit-learn* implements a function to retrieve the\ndecision path. It can be enabled by option *decision_path*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "clrrf = RandomForestClassifier(n_estimators=2, max_depth=2)\nclrrf.fit(X_train, y_train)\nclrrf.predict(X_test[:2])\npaths, n_nodes_ptr = clrrf.decision_path(X_test[:2])\nprint(paths.todense())\n\nmodel_def = to_onnx(clrrf, X_train.astype(numpy.float32),\n options={id(clrrf): {'decision_path': True,\n 'zipmap': False}})\nsess = InferenceSession(model_def.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model produces 3 outputs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print([o.name for o in sess.get_outputs()])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's display the last one.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "res = sess.run(None, {'X': X_test[:2].astype(numpy.float32)})\nprint(res[-1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## List of available options\n\nOptions are registered for every converted to detect any\nsupported options while running the conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "all_opts = set()\nfor k, v in sorted(_converter_pool.items()):\n opts = v.get_allowed_options()\n if not isinstance(opts, dict):\n continue\n name = k.replace('Sklearn', '')\n print('%s%s %r' % (name, \" \" * (30 - len(name)), opts))\n for o in opts:\n all_opts.add(o)\n\nprint('all options:', pformat(list(sorted(all_opts))))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# One model, many possible conversions with options\n\n.. index:: options\n\nThere is not one way to convert a model. A new operator\nmight have been added in a newer version of :epkg:`ONNX`\nand that speeds up the converted model. The rational choice\nwould be to use this new operator but what means the associated\nruntime has an implementation for it. What if two different\nusers needs two different conversion for the same model?\nLet's see how this may be done.\n\n## Option *zipmap*\n\nEvery classifier is by design converted into an ONNX graph which outputs\ntwo results: the predicted label and the prediction probabilites\nfor every label. By default, the labels are integers and the\nprobabilites are stored in dictionaries. That's the purpose\nof operator *ZipMap* added at the end of the following graph.\n\n.. gdot::\n :script: DOT-SECTION\n\n import numpy\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from sklearn.linear_model import LogisticRegression\n from skl2onnx import to_onnx\n from mlprodict.onnxrt import OnnxInference\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, _, y_train, __ = train_test_split(X, y, random_state=11)\n clr = LogisticRegression()\n clr.fit(X_train, y_train)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32))\n oinf = OnnxInference(model_def)\n print(\"DOT-SECTION\", oinf.to_dot())\n\nThis operator is not really efficient as it copies every probabilies and\nlabels in a different container. This time is usually significant for\nsmall classifiers. Then it makes sense to remove it.\n\n.. gdot::\n :script: DOT-SECTION\n\n import numpy\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from sklearn.linear_model import LogisticRegression\n from skl2onnx import to_onnx\n from mlprodict.onnxrt import OnnxInference\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, _, y_train, __ = train_test_split(X, y, random_state=11)\n clr = LogisticRegression()\n clr.fit(X_train, y_train)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={LogisticRegression: {'zipmap': False}})\n oinf = OnnxInference(model_def)\n print(\"DOT-SECTION\", oinf.to_dot())\n\nThere might be in the graph many classifiers, it is important to have\na way to specify which classifier should keep its *ZipMap*\nand which is not. So it is possible to specify options by id.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pprint import pformat\nimport numpy\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom skl2onnx.common._registration import _converter_pool\nfrom skl2onnx import to_onnx\nfrom onnxruntime import InferenceSession\nfrom mlprodict.onnxrt import OnnxInference\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)\nclr = LogisticRegression()\nclr.fit(X_train, y_train)\n\nmodel_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={id(clr): {'zipmap': False}})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visually.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to compare that kind of visualisation to\nwhat it would give with operator *ZipMap*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_def = to_onnx(clr, X_train.astype(numpy.float32))\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visually.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using function *id* has one flaw: it is not pickable.\nIt is just better to use strings.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_def = to_onnx(clr, X_train.astype(numpy.float32),\n options={'zipmap': False})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visually.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option in a pipeline\n\nIn a pipeline, :epkg:`sklearn-onnx` uses the same\nname convention.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pipe = Pipeline([\n ('norm', MinMaxScaler()),\n ('clr', LogisticRegression())\n])\npipe.fit(X_train, y_train)\n\nmodel_def = to_onnx(pipe, X_train.astype(numpy.float32),\n options={'clr__zipmap': False})\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visually.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option *raw_scores*\n\nEvery classifier is converted in a graph which\nreturns probabilities by default. But many models\ncompute unscaled *raw_scores*.\nFirst, with probabilities:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pipe = Pipeline([\n ('norm', MinMaxScaler()),\n ('clr', LogisticRegression())\n])\npipe.fit(X_train, y_train)\n\nmodel_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe): {'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then with raw scores:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe): {'raw_scores': True, 'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It did not seem to work... We need to tell\nthat applies on a specific part of the pipeline\nand not the whole pipeline.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={id(pipe.steps[1][1]): {'raw_scores': True, 'zipmap': False}})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are negative values. That works.\nStrings are still easier to use.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_def = to_onnx(\n pipe, X_train.astype(numpy.float32),\n options={'clr__raw_scores': True, 'clr__zipmap': False})\n\noinf = OnnxInference(model_def, runtime='python_compiled')\nprint(oinf.run({'X': X.astype(numpy.float32)[:5]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Negative figures. We still have raw scores.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option *decision_path*\n\n*scikit-learn* implements a function to retrieve the\ndecision path. It can be enabled by option *decision_path*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "clrrf = RandomForestClassifier(n_estimators=2, max_depth=2)\nclrrf.fit(X_train, y_train)\nclrrf.predict(X_test[:2])\npaths, n_nodes_ptr = clrrf.decision_path(X_test[:2])\nprint(paths.todense())\n\nmodel_def = to_onnx(clrrf, X_train.astype(numpy.float32),\n options={id(clrrf): {'decision_path': True,\n 'zipmap': False}})\nsess = InferenceSession(model_def.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model produces 3 outputs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print([o.name for o in sess.get_outputs()])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's display the last one.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "res = sess.run(None, {'X': X_test[:2].astype(numpy.float32)})\nprint(res[-1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## List of available options\n\nOptions are registered for every converted to detect any\nsupported options while running the conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "all_opts = set()\nfor k, v in sorted(_converter_pool.items()):\n opts = v.get_allowed_options()\n if not isinstance(opts, dict):\n continue\n name = k.replace('Sklearn', '')\n print('%s%s %r' % (name, \" \" * (30 - len(name)), opts))\n for o in opts:\n all_opts.add(o)\n\nprint('all options:', pformat(list(sorted(all_opts))))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/940d20a5bdb46eb51fb878fbb3bd928d/plot_metadata.ipynb b/_downloads/940d20a5bdb46eb51fb878fbb3bd928d/plot_metadata.ipynb index 100ee0985..8013b083b 100644 --- a/_downloads/940d20a5bdb46eb51fb878fbb3bd928d/plot_metadata.ipynb +++ b/_downloads/940d20a5bdb46eb51fb878fbb3bd928d/plot_metadata.ipynb @@ -1,90 +1,90 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Metadata\n\n.. index:: metadata\n\nONNX format contains metadata related to how the\nmodel was produced. It is useful when the model\nis deployed to production to keep track of which\ninstance was used at a specific time.\nLet's see how to do that with a simple\nlogistic regression model trained with\n*scikit-learn*.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnxruntime\nimport sklearn\nimport numpy\nfrom onnxruntime import InferenceSession\nimport onnx\nfrom onnxruntime.datasets import get_example\n\nexample = get_example(\"logreg_iris.onnx\")\n\nmodel = onnx.load(example)\n\nprint(\"doc_string={}\".format(model.doc_string))\nprint(\"domain={}\".format(model.domain))\nprint(\"ir_version={}\".format(model.ir_version))\nprint(\"metadata_props={}\".format(model.metadata_props))\nprint(\"model_version={}\".format(model.model_version))\nprint(\"producer_name={}\".format(model.producer_name))\nprint(\"producer_version={}\".format(model.producer_version))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With *ONNX Runtime*:\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(example)\nmeta = sess.get_modelmeta()\n\nprint(\"custom_metadata_map={}\".format(meta.custom_metadata_map))\nprint(\"description={}\".format(meta.description))\nprint(\"domain={}\".format(meta.domain))\nprint(\"graph_name={}\".format(meta.graph_name))\nprint(\"producer_name={}\".format(meta.producer_name))\nprint(\"version={}\".format(meta.version))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Metadata\n\n.. index:: metadata\n\nONNX format contains metadata related to how the\nmodel was produced. It is useful when the model\nis deployed to production to keep track of which\ninstance was used at a specific time.\nLet's see how to do that with a simple\nlogistic regression model trained with\n*scikit-learn*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnxruntime\nimport sklearn\nimport numpy\nfrom onnxruntime import InferenceSession\nimport onnx\nfrom onnxruntime.datasets import get_example\n\nexample = get_example(\"logreg_iris.onnx\")\n\nmodel = onnx.load(example)\n\nprint(\"doc_string={}\".format(model.doc_string))\nprint(\"domain={}\".format(model.domain))\nprint(\"ir_version={}\".format(model.ir_version))\nprint(\"metadata_props={}\".format(model.metadata_props))\nprint(\"model_version={}\".format(model.model_version))\nprint(\"producer_name={}\".format(model.producer_name))\nprint(\"producer_version={}\".format(model.producer_version))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With *ONNX Runtime*:\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(example)\nmeta = sess.get_modelmeta()\n\nprint(\"custom_metadata_map={}\".format(meta.custom_metadata_map))\nprint(\"description={}\".format(meta.description))\nprint(\"domain={}\".format(meta.domain))\nprint(\"graph_name={}\".format(meta.graph_name))\nprint(\"producer_name={}\".format(meta.producer_name))\nprint(\"version={}\".format(meta.version))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/969179b1cb934adb05c3f3b77a0cfd36/plot_woe_transformer.ipynb b/_downloads/969179b1cb934adb05c3f3b77a0cfd36/plot_woe_transformer.ipynb index 7ad307a76..1a459fbc6 100644 --- a/_downloads/969179b1cb934adb05c3f3b77a0cfd36/plot_woe_transformer.ipynb +++ b/_downloads/969179b1cb934adb05c3f3b77a0cfd36/plot_woe_transformer.ipynb @@ -1,198 +1,198 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Converter for WOE\n\nWOE means Weights of Evidence. It consists in checking that\na feature X belongs to a series of regions - intervals -.\nThe results is the label of every intervals containing the feature.\n\n.. index:: WOE, WOETransformer\n\n## A simple example\n\nX is a vector made of the first ten integers. Class\n:class:`WOETransformer `\nchecks that every of them belongs to two intervals,\n`]1, 3[` (leftright-opened) and `[5, 7]`\n(left-right-closed). The first interval is associated\nto weight 55 and and the second one to 107.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import os\nimport numpy as np\nimport pandas as pd\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nimport matplotlib.pyplot as plt\nfrom skl2onnx import to_onnx\nfrom skl2onnx.sklapi import WOETransformer\n# automatically registers the converter for WOETransformer\nimport skl2onnx.sklapi.register # noqa\n\nX = np.arange(10).astype(np.float32).reshape((-1, 1))\n\nintervals = [\n [(1., 3., False, False),\n (5., 7., True, True)]]\nweights = [[55, 107]]\n\nwoe1 = WOETransformer(intervals, onehot=False, weights=weights)\nwoe1.fit(X)\nprd = woe1.transform(X)\ndf = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})\ndf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One Hot\n\nThe transformer outputs one column with the weights.\nBut it could return one column per interval.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "woe2 = WOETransformer(intervals, onehot=True, weights=weights)\nwoe2.fit(X)\nprd = woe2.transform(X)\ndf = pd.DataFrame(prd)\ndf.columns = ['I1', 'I2']\ndf['X'] = X\ndf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In that case, weights can be omitted.\nThe output is binary.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "woe = WOETransformer(intervals, onehot=True)\nwoe.fit(X)\nprd = woe.transform(X)\ndf = pd.DataFrame(prd)\ndf.columns = ['I1', 'I2']\ndf['X'] = X\ndf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\n*skl2onnx* implements a converter for all cases.\n\nonehot=False\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx1 = to_onnx(woe1, X)\nsess = InferenceSession(onx1.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "onehot=True\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx2 = to_onnx(woe2, X)\nsess = InferenceSession(onx2.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX Graphs\n\nonehot=False\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n onx1.graph, name=onx1.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"woe1.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng woe1.dot')\n\nimage = plt.imread(\"woe1.dot.png\")\nfig, ax = plt.subplots(figsize=(10, 10))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "onehot=True\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n onx2.graph, name=onx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"woe2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng woe2.dot')\n\nimage = plt.imread(\"woe2.dot.png\")\nfig, ax = plt.subplots(figsize=(10, 10))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Half-line\n\nAn interval may have only one extremity defined and the other\ncan be infinite.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "intervals = [\n [(-np.inf, 3., True, True),\n (5., np.inf, True, True)]]\nweights = [[55, 107]]\n\nwoe1 = WOETransformer(intervals, onehot=False, weights=weights)\nwoe1.fit(X)\nprd = woe1.transform(X)\ndf = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})\ndf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And the conversion to ONNX using the same instruction.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onxinf = to_onnx(woe1, X)\nsess = InferenceSession(onxinf.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Converter for WOE\n\nWOE means Weights of Evidence. It consists in checking that\na feature X belongs to a series of regions - intervals -.\nThe results is the label of every intervals containing the feature.\n\n.. index:: WOE, WOETransformer\n\n## A simple example\n\nX is a vector made of the first ten integers. Class\n:class:`WOETransformer `\nchecks that every of them belongs to two intervals,\n`]1, 3[` (leftright-opened) and `[5, 7]`\n(left-right-closed). The first interval is associated\nto weight 55 and and the second one to 107.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import os\nimport numpy as np\nimport pandas as pd\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom onnxruntime import InferenceSession\nimport matplotlib.pyplot as plt\nfrom skl2onnx import to_onnx\nfrom skl2onnx.sklapi import WOETransformer\n# automatically registers the converter for WOETransformer\nimport skl2onnx.sklapi.register # noqa\n\nX = np.arange(10).astype(np.float32).reshape((-1, 1))\n\nintervals = [\n [(1., 3., False, False),\n (5., 7., True, True)]]\nweights = [[55, 107]]\n\nwoe1 = WOETransformer(intervals, onehot=False, weights=weights)\nwoe1.fit(X)\nprd = woe1.transform(X)\ndf = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})\ndf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## One Hot\n\nThe transformer outputs one column with the weights.\nBut it could return one column per interval.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "woe2 = WOETransformer(intervals, onehot=True, weights=weights)\nwoe2.fit(X)\nprd = woe2.transform(X)\ndf = pd.DataFrame(prd)\ndf.columns = ['I1', 'I2']\ndf['X'] = X\ndf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In that case, weights can be omitted.\nThe output is binary.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "woe = WOETransformer(intervals, onehot=True)\nwoe.fit(X)\nprd = woe.transform(X)\ndf = pd.DataFrame(prd)\ndf.columns = ['I1', 'I2']\ndf['X'] = X\ndf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\n*skl2onnx* implements a converter for all cases.\n\nonehot=False\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx1 = to_onnx(woe1, X)\nsess = InferenceSession(onx1.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "onehot=True\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx2 = to_onnx(woe2, X)\nsess = InferenceSession(onx2.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX Graphs\n\nonehot=False\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n onx1.graph, name=onx1.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"woe1.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng woe1.dot')\n\nimage = plt.imread(\"woe1.dot.png\")\nfig, ax = plt.subplots(figsize=(10, 10))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "onehot=True\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n onx2.graph, name=onx2.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"woe2.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng woe2.dot')\n\nimage = plt.imread(\"woe2.dot.png\")\nfig, ax = plt.subplots(figsize=(10, 10))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Half-line\n\nAn interval may have only one extremity defined and the other\ncan be infinite.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "intervals = [\n [(-np.inf, 3., True, True),\n (5., np.inf, True, True)]]\nweights = [[55, 107]]\n\nwoe1 = WOETransformer(intervals, onehot=False, weights=weights)\nwoe1.fit(X)\nprd = woe1.transform(X)\ndf = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})\ndf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And the conversion to ONNX using the same instruction.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onxinf = to_onnx(woe1, X)\nsess = InferenceSession(onxinf.SerializeToString())\nprint(sess.run(None, {'X': X})[0])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/9780baa920342800e95b07198173bc66/plot_usparse_xgboost.ipynb b/_downloads/9780baa920342800e95b07198173bc66/plot_usparse_xgboost.ipynb index 7fc624e07..48d893a08 100644 --- a/_downloads/9780baa920342800e95b07198173bc66/plot_usparse_xgboost.ipynb +++ b/_downloads/9780baa920342800e95b07198173bc66/plot_usparse_xgboost.ipynb @@ -1,169 +1,169 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# TfIdf and sparse matrices\n\n.. index:: xgboost, lightgbm, sparse, ensemble\n\n`TfidfVectorizer `_\nusually creates sparse data. If the data is sparse enough, matrices\nusually stays as sparse all along the pipeline until the predictor\nis trained. Sparse matrices do not consider null and missing values\nas they are not present in the datasets. Because some predictors\ndo the difference, this ambiguity may introduces discrepencies\nwhen converter into ONNX. This example looks into several configurations.\n\n## Imports, setups\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand *lightgbm*.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import warnings\nimport numpy\nimport pandas\nimport onnxruntime as rt\nfrom tqdm import tqdm\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.ensemble import RandomForestClassifier\ntry:\n from sklearn.ensemble import HistGradientBoostingClassifier\nexcept ImportError:\n HistGradientBoostingClassifier = None\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom skl2onnx.common.data_types import FloatTensorType, StringTensorType\nfrom skl2onnx import to_onnx, update_registered_converter\nfrom skl2onnx.sklapi import CastTransformer, ReplaceTransformer\nfrom skl2onnx.common.shape_calculator import (\n calculate_linear_classifier_output_shapes)\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import (\n convert_xgboost)\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import (\n convert_lightgbm)\n\n\nupdate_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})\nupdate_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Artificial datasets\n\nIris + a text column.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "cst = ['class zero', 'class one', 'class two']\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\ndf = pandas.DataFrame(X)\ndf[\"text\"] = [cst[i] for i in y]\n\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train ensemble after sparse\n\nThe example use the Iris datasets with artifical text datasets\npreprocessed with a tf-idf. `sparse_threshold=1.` avoids\nsparse matrices to be converted into dense matrices.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def make_pipelines(df_train, y_train, models=None,\n sparse_threshold=1., replace_nan=False,\n insert_replace=False):\n\n if models is None:\n models = [\n RandomForestClassifier, HistGradientBoostingClassifier,\n XGBClassifier, LGBMClassifier]\n models = [_ for _ in models if _ is not None]\n\n pipes = []\n for model in tqdm(models):\n\n if model == HistGradientBoostingClassifier:\n kwargs = dict(max_iter=5)\n elif model == XGBClassifier:\n kwargs = dict(n_estimators=5, use_label_encoder=False)\n else:\n kwargs = dict(n_estimators=5)\n\n if insert_replace:\n pipe = Pipeline([\n ('union', ColumnTransformer([\n ('scale1', StandardScaler(), [0, 1]),\n ('subject',\n Pipeline([\n ('count', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('repl', ReplaceTransformer()),\n ]), \"text\"),\n ], sparse_threshold=sparse_threshold)),\n ('cast', CastTransformer()),\n ('cls', model(max_depth=3, **kwargs)),\n ])\n else:\n pipe = Pipeline([\n ('union', ColumnTransformer([\n ('scale1', StandardScaler(), [0, 1]),\n ('subject',\n Pipeline([\n ('count', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ]), \"text\"),\n ], sparse_threshold=sparse_threshold)),\n ('cast', CastTransformer()),\n ('cls', model(max_depth=3, **kwargs)),\n ])\n\n try:\n pipe.fit(df_train, y_train)\n except TypeError as e:\n obs = dict(model=model.__name__, pipe=pipe, error=e)\n pipes.append(obs)\n continue\n\n options = {model: {'zipmap': False}}\n if replace_nan:\n options[TfidfTransformer] = {'nan': True}\n\n # convert\n with warnings.catch_warnings(record=False):\n warnings.simplefilter(\"ignore\", (FutureWarning, UserWarning))\n model_onnx = to_onnx(\n pipe,\n initial_types=[('input', FloatTensorType([None, 2])),\n ('text', StringTensorType([None, 1]))],\n target_opset={'': 12, 'ai.onnx.ml': 2},\n options=options)\n\n with open('model.onnx', 'wb') as f:\n f.write(model_onnx.SerializeToString())\n\n sess = rt.InferenceSession(model_onnx.SerializeToString())\n inputs = {\"input\": df[[0, 1]].values.astype(numpy.float32),\n \"text\": df[[\"text\"]].values}\n pred_onx = sess.run(None, inputs)\n\n diff = numpy.abs(\n pred_onx[1].ravel() -\n pipe.predict_proba(df).ravel()).sum()\n\n obs = dict(model=model.__name__,\n discrepencies=diff,\n model_onnx=model_onnx, pipe=pipe)\n pipes.append(obs)\n\n return pipes\n\n\ndata_sparse = make_pipelines(df, y)\nstat = pandas.DataFrame(data_sparse).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Sparse data hurts.\n\n## Dense data\n\nLet's replace sparse data with dense by using `sparse_threshold=0.`\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data_dense = make_pipelines(df, y, sparse_threshold=0.)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is much better. Let's compare how the preprocessing\napplies on the data.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"sparse\")\nprint(data_sparse[-1]['pipe'].steps[0][-1].transform(df)[:2])\nprint()\nprint(\"dense\")\nprint(data_dense[-1]['pipe'].steps[0][-1].transform(df)[:2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This shows `RandomForestClassifier\n`_,\n`XGBClassifier `_ do not process\nthe same way sparse and\ndense matrix as opposed to `LGBMClassifier\n`_.\nAnd `HistGradientBoostingClassifier\n`_\nfails.\n\n## Dense data with nan\n\nLet's keep sparse data in the scikit-learn pipeline but\nreplace null values by nan in the onnx graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=True)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dense, 0 replaced by nan\n\nInstead of using a specific options to replace null values\ninto nan values, a custom transformer called\nReplaceTransformer is explicitely inserted into the pipeline.\nA new converter is added to the list of supported models.\nIt is equivalent to the previous options except it is\nmore explicit.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=False,\n insert_replace=True)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conclusion\n\nUnless dense arrays are used, because *onnxruntime*\nONNX does not support sparse yet, the conversion needs to be\ntuned depending on the model which follows the TfIdf preprocessing.\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# TfIdf and sparse matrices\n\n.. index:: xgboost, lightgbm, sparse, ensemble\n\n[TfidfVectorizer](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.feature_extraction.text.TfidfVectorizer.html)\nusually creates sparse data. If the data is sparse enough, matrices\nusually stays as sparse all along the pipeline until the predictor\nis trained. Sparse matrices do not consider null and missing values\nas they are not present in the datasets. Because some predictors\ndo the difference, this ambiguity may introduces discrepencies\nwhen converter into ONNX. This example looks into several configurations.\n\n## Imports, setups\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand *lightgbm*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import warnings\nimport numpy\nimport pandas\nimport onnxruntime as rt\nfrom tqdm import tqdm\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.ensemble import RandomForestClassifier\ntry:\n from sklearn.ensemble import HistGradientBoostingClassifier\nexcept ImportError:\n HistGradientBoostingClassifier = None\nfrom xgboost import XGBClassifier\nfrom lightgbm import LGBMClassifier\nfrom skl2onnx.common.data_types import FloatTensorType, StringTensorType\nfrom skl2onnx import to_onnx, update_registered_converter\nfrom skl2onnx.sklapi import CastTransformer, ReplaceTransformer\nfrom skl2onnx.common.shape_calculator import (\n calculate_linear_classifier_output_shapes)\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import (\n convert_xgboost)\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import (\n convert_lightgbm)\n\n\nupdate_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})\nupdate_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Artificial datasets\n\nIris + a text column.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "cst = ['class zero', 'class one', 'class two']\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\ndf = pandas.DataFrame(X)\ndf.columns = [f\"c{c}\" for c in df.columns]\ndf[\"text\"] = [cst[i] for i in y]\n\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train ensemble after sparse\n\nThe example use the Iris datasets with artifical text datasets\npreprocessed with a tf-idf. `sparse_threshold=1.` avoids\nsparse matrices to be converted into dense matrices.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def make_pipelines(df_train, y_train, models=None,\n sparse_threshold=1., replace_nan=False,\n insert_replace=False):\n\n if models is None:\n models = [\n RandomForestClassifier, HistGradientBoostingClassifier,\n XGBClassifier, LGBMClassifier]\n models = [_ for _ in models if _ is not None]\n\n pipes = []\n for model in tqdm(models):\n\n if model == HistGradientBoostingClassifier:\n kwargs = dict(max_iter=5)\n elif model == XGBClassifier:\n kwargs = dict(n_estimators=5, use_label_encoder=False)\n else:\n kwargs = dict(n_estimators=5)\n\n if insert_replace:\n pipe = Pipeline([\n ('union', ColumnTransformer([\n ('scale1', StandardScaler(), [0, 1]),\n ('subject',\n Pipeline([\n ('count', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('repl', ReplaceTransformer()),\n ]), \"text\"),\n ], sparse_threshold=sparse_threshold)),\n ('cast', CastTransformer()),\n ('cls', model(max_depth=3, **kwargs)),\n ])\n else:\n pipe = Pipeline([\n ('union', ColumnTransformer([\n ('scale1', StandardScaler(), [0, 1]),\n ('subject',\n Pipeline([\n ('count', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ]), \"text\"),\n ], sparse_threshold=sparse_threshold)),\n ('cast', CastTransformer()),\n ('cls', model(max_depth=3, **kwargs)),\n ])\n\n try:\n pipe.fit(df_train, y_train)\n except TypeError as e:\n obs = dict(model=model.__name__, pipe=pipe, error=e,\n model_onnx=None)\n pipes.append(obs)\n continue\n\n options = {model: {'zipmap': False}}\n if replace_nan:\n options[TfidfTransformer] = {'nan': True}\n\n # convert\n with warnings.catch_warnings(record=False):\n warnings.simplefilter(\"ignore\", (FutureWarning, UserWarning))\n model_onnx = to_onnx(\n pipe,\n initial_types=[('input', FloatTensorType([None, 2])),\n ('text', StringTensorType([None, 1]))],\n target_opset={'': 12, 'ai.onnx.ml': 2},\n options=options)\n\n with open('model.onnx', 'wb') as f:\n f.write(model_onnx.SerializeToString())\n\n sess = rt.InferenceSession(model_onnx.SerializeToString())\n inputs = {\"input\": df[[\"c0\", \"c1\"]].values.astype(numpy.float32),\n \"text\": df[[\"text\"]].values}\n pred_onx = sess.run(None, inputs)\n\n diff = numpy.abs(\n pred_onx[1].ravel() -\n pipe.predict_proba(df).ravel()).sum()\n\n obs = dict(model=model.__name__,\n discrepencies=diff,\n model_onnx=model_onnx, pipe=pipe)\n pipes.append(obs)\n\n return pipes\n\n\ndata_sparse = make_pipelines(df, y)\nstat = pandas.DataFrame(data_sparse).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Sparse data hurts.\n\n## Dense data\n\nLet's replace sparse data with dense by using `sparse_threshold=0.`\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data_dense = make_pipelines(df, y, sparse_threshold=0.)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is much better. Let's compare how the preprocessing\napplies on the data.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"sparse\")\nprint(data_sparse[-1]['pipe'].steps[0][-1].transform(df)[:2])\nprint()\nprint(\"dense\")\nprint(data_dense[-1]['pipe'].steps[0][-1].transform(df)[:2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This shows [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/\nsklearn.ensemble.RandomForestClassifier.html),\n[XGBClassifier](https://xgboost.readthedocs.io/\nen/latest/python/python_api.html) do not process\nthe same way sparse and\ndense matrix as opposed to [LGBMClassifier](https://lightgbm.readthedocs.io/en/latest/\npythonapi/lightgbm.LGBMClassifier.html).\nAnd [HistGradientBoostingClassifier](https://scikit-learn.org/stable/modules/generated/\nsklearn.ensemble.HistGradientBoostingClassifier.html)\nfails.\n\n## Dense data with nan\n\nLet's keep sparse data in the scikit-learn pipeline but\nreplace null values by nan in the onnx graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=True)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dense, 0 replaced by nan\n\nInstead of using a specific options to replace null values\ninto nan values, a custom transformer called\nReplaceTransformer is explicitely inserted into the pipeline.\nA new converter is added to the list of supported models.\nIt is equivalent to the previous options except it is\nmore explicit.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=False,\n insert_replace=True)\nstat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)\nif 'error' in stat.columns:\n print(stat.drop('error', axis=1))\nstat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion\n\nUnless dense arrays are used, because *onnxruntime*\nONNX does not support sparse yet, the conversion needs to be\ntuned depending on the model which follows the TfIdf preprocessing.\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/9b2ed93085f33a626b99450d1dc5ab62/plot_catwoe_transformer.py b/_downloads/9b2ed93085f33a626b99450d1dc5ab62/plot_catwoe_transformer.py index f811f7bfb..338cf5939 100644 --- a/_downloads/9b2ed93085f33a626b99450d1dc5ab62/plot_catwoe_transformer.py +++ b/_downloads/9b2ed93085f33a626b99450d1dc5ab62/plot_catwoe_transformer.py @@ -17,9 +17,6 @@ .. index:: WOE, WOEEncoder -.. contents:: - :local: - A simple example ++++++++++++++++ diff --git a/_downloads/9caff3b937d46b592fb95f2eb590c005/plot_ngrams.ipynb b/_downloads/9caff3b937d46b592fb95f2eb590c005/plot_ngrams.ipynb new file mode 100644 index 000000000..7fce5ced4 --- /dev/null +++ b/_downloads/9caff3b937d46b592fb95f2eb590c005/plot_ngrams.ipynb @@ -0,0 +1,162 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Tricky issue when converting CountVectorizer or TfidfVectorizer\n\nThis issue is described at [scikit-learn/issues/13733](https://github.com/scikit-learn/scikit-learn/issues/13733).\nIf a CountVectorizer or a TfidfVectorizer produces a token with a space,\nskl2onnx cannot know if it a bi-grams or a unigram with a space.\n\n## A simple example impossible to convert\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import pprint\nimport numpy\nfrom numpy.testing import assert_almost_equal\nfrom onnxruntime import InferenceSession\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom skl2onnx import to_onnx\nfrom skl2onnx.sklapi import TraceableTfidfVectorizer\nimport skl2onnx.sklapi.register # noqa\n\ncorpus = numpy.array([\n \"This is the first document.\",\n \"This document is the second document.\",\n \"Is this the first document?\",\n \"\",\n]).reshape((4, ))\n\npattern = r\"\\b[a-z ]{1,10}\\b\"\nmod1 = TfidfVectorizer(ngram_range=(1, 2),\n token_pattern=pattern)\nmod1.fit(corpus)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Unigrams and bi-grams are placed into the following container\nwhich maps it to its column index.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pprint.pprint(mod1.vocabulary_)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(mod1, corpus)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TraceableTfidfVectorizer\n\nClass :class:`TraceableTfidfVectorizer` is equivalent to\n:class:`sklearn.feature_extraction.text.TfidfVectorizer`\nbut stores the unigrams and bi-grams of the vocabulary with tuple\ninstead of concatenating every piece into a string.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "mod2 = TraceableTfidfVectorizer(\n ngram_range=(1, 2), token_pattern=pattern)\nmod2.fit(corpus)\n\npprint.pprint(mod2.vocabulary_)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check it produces the same results.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "assert_almost_equal(mod1.transform(corpus).todense(),\n mod2.transform(corpus).todense())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Conversion. Line `import skl2onnx.sklapi.register`\nwas added to register the converters associated to these\nnew class. By default, only converters for scikit-learn are\ndeclared.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(mod2, corpus)\nsess = InferenceSession(onx.SerializeToString())\ngot = sess.run(None, {'X': corpus})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's check if there are discrepancies...\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "assert_almost_equal(mod2.transform(corpus).todense(), got[0])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/_downloads/9e0b8d8227a02be93003ae4ea6b83dd5/plot_complex_pipeline.py b/_downloads/9e0b8d8227a02be93003ae4ea6b83dd5/plot_complex_pipeline.py index f0ccdc5e4..1e4c58a2c 100644 --- a/_downloads/9e0b8d8227a02be93003ae4ea6b83dd5/plot_complex_pipeline.py +++ b/_downloads/9e0b8d8227a02be93003ae4ea6b83dd5/plot_complex_pipeline.py @@ -16,9 +16,6 @@ :ref:`l-complex-pipeline`. -.. contents:: - :local: - Create and train a complex pipeline +++++++++++++++++++++++++++++++++++ @@ -199,7 +196,8 @@ def convert_dataframe_schema(df, drop=None): ################################ # We are ready to run *onnxruntime*. -sess = rt.InferenceSession("pipeline_titanic.onnx") +sess = rt.InferenceSession("pipeline_titanic.onnx", + providers=["CPUExecutionProvider"]) pred_onx = sess.run(None, inputs) print("predict", pred_onx[0][:5]) print("predict_proba", pred_onx[1][:2]) @@ -215,7 +213,8 @@ def convert_dataframe_schema(df, drop=None): with open("pipeline_titanic_nozipmap.onnx", "wb") as f: f.write(model_onnx.SerializeToString()) -sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx") +sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx", + providers=["CPUExecutionProvider"]) pred_onx = sess.run(None, inputs) print("predict", pred_onx[0][:5]) print("predict_proba", pred_onx[1][:2]) diff --git a/_downloads/a072425c637cf9d3f703fe766b50e29b/plot_convert_zipmap.py b/_downloads/a072425c637cf9d3f703fe766b50e29b/plot_convert_zipmap.py index 123d4e2e4..1b0eaa48e 100644 --- a/_downloads/a072425c637cf9d3f703fe766b50e29b/plot_convert_zipmap.py +++ b/_downloads/a072425c637cf9d3f703fe766b50e29b/plot_convert_zipmap.py @@ -15,9 +15,6 @@ always needed. Let's see how to deactivate this behaviour on the Iris example. -.. contents:: - :local: - Train a model and convert it ++++++++++++++++++++++++++++ diff --git a/_downloads/a0ac2ff955d6e25c89df19b4b15947c9/plot_bbegin_measure_time.py b/_downloads/a0ac2ff955d6e25c89df19b4b15947c9/plot_bbegin_measure_time.py index c9222bd85..d40b743f3 100644 --- a/_downloads/a0ac2ff955d6e25c89df19b4b15947c9/plot_bbegin_measure_time.py +++ b/_downloads/a0ac2ff955d6e25c89df19b4b15947c9/plot_bbegin_measure_time.py @@ -11,10 +11,6 @@ and compares the processing time required by each option to compute predictions. -.. contents:: - :local: - - Training a pipeline +++++++++++++++++++ """ diff --git a/_downloads/a1b4a2c3c0c8e694c551f23e5a69531e/plot_gexternal_catboost.ipynb b/_downloads/a1b4a2c3c0c8e694c551f23e5a69531e/plot_gexternal_catboost.ipynb new file mode 100644 index 000000000..062718e64 --- /dev/null +++ b/_downloads/a1b4a2c3c0c8e694c551f23e5a69531e/plot_gexternal_catboost.ipynb @@ -0,0 +1,144 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a CatBoost classifier\n\n.. index:: CatBoost\n\n:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX*\nbut many libraries implement :epkg:`scikit-learn` API so that their models\ncan be included in a :epkg:`scikit-learn` pipeline. This example considers\na pipeline including a :epkg:`CatBoost` model. :epkg:`sklearn-onnx` can convert\nthe whole pipeline as long as it knows the converter associated to\na *CatBoostClassifier*. Let's see how to do it.\n\n## Train a CatBoostClassifier\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nimport numpy\nfrom onnx.helper import get_attribute_value\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom mlprodict.onnxrt import OnnxInference\nimport onnxruntime as rt\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType, guess_tensor_type\nfrom skl2onnx._parse import _apply_zipmap, _get_sklearn_operator_name\nfrom catboost import CatBoostClassifier\nfrom catboost.utils import convert_to_onnx_object\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', CatBoostClassifier(n_estimators=3))])\npipe.fit(X, y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for CatBoostClassifier\n\nThe model has no converter implemented in sklearn-onnx.\nWe need to register the one coming from *CatBoost* itself.\nHowever, the converter does not follow sklearn-onnx design and\nneeds to be wrapped.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def skl2onnx_parser_castboost_classifier(scope, model, inputs,\n custom_parsers=None):\n options = scope.get_options(model, dict(zipmap=True))\n no_zipmap = isinstance(options['zipmap'], bool) and not options['zipmap']\n\n alias = _get_sklearn_operator_name(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n this_operator.inputs = inputs\n\n label_variable = scope.declare_local_variable('label', Int64TensorType())\n prob_dtype = guess_tensor_type(inputs[0].type)\n probability_tensor_variable = scope.declare_local_variable('probabilities', prob_dtype)\n this_operator.outputs.append(label_variable)\n this_operator.outputs.append(probability_tensor_variable)\n probability_tensor = this_operator.outputs\n\n if no_zipmap:\n return probability_tensor\n\n return _apply_zipmap(options['zipmap'], scope, model,\n inputs[0].type, probability_tensor)\n\n\ndef skl2onnx_convert_catboost(scope, operator, container):\n \"\"\"\n CatBoost returns an ONNX graph with a single node.\n This function adds it to the main graph.\n \"\"\"\n onx = convert_to_onnx_object(operator.raw_operator)\n opsets = {d.domain: d.version for d in onx.opset_import}\n if '' in opsets and opsets[''] >= container.target_opset:\n raise RuntimeError(\n \"CatBoost uses an opset more recent than the target one.\")\n if len(onx.graph.initializer) > 0 or len(onx.graph.sparse_initializer) > 0:\n raise NotImplementedError(\n \"CatBoost returns a model initializers. This option is not implemented yet.\")\n if (len(onx.graph.node) not in (1, 2) or not onx.graph.node[0].op_type.startswith(\"TreeEnsemble\") or\n (len(onx.graph.node) == 2 and onx.graph.node[1].op_type != \"ZipMap\")):\n types = \", \".join(map(lambda n: n.op_type, onx.graph.node))\n raise NotImplementedError(\n f\"CatBoost returns {len(onx.graph.node)} != 1 (types={types}). \"\n f\"This option is not implemented yet.\")\n node = onx.graph.node[0]\n atts = {}\n for att in node.attribute:\n atts[att.name] = get_attribute_value(att)\n container.add_node(\n node.op_type, [operator.inputs[0].full_name],\n [operator.outputs[0].full_name, operator.outputs[1].full_name],\n op_domain=node.domain, op_version=opsets.get(node.domain, None),\n **atts)\n\n\nupdate_registered_converter(\n CatBoostClassifier,\n 'CatBoostCatBoostClassifier',\n calculate_linear_classifier_output_shapes,\n skl2onnx_convert_catboost,\n parser=skl2onnx_parser_castboost_classifier,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n pipe, 'pipeline_catboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_catboost.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nPredictions with CatBoost.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_catboost.onnx\")\n\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(model_onnx)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/_downloads/a21caf23a18df75a6991a3b7db5b83c6/plot_convert_decision_function.py b/_downloads/a21caf23a18df75a6991a3b7db5b83c6/plot_convert_decision_function.py index afc9259fd..8c15a70fb 100644 --- a/_downloads/a21caf23a18df75a6991a3b7db5b83c6/plot_convert_decision_function.py +++ b/_downloads/a21caf23a18df75a6991a3b7db5b83c6/plot_convert_decision_function.py @@ -15,9 +15,6 @@ is used to change the default behaviour. Let's see that on a simple example. -.. contents:: - :local: - Train a model and convert it ++++++++++++++++++++++++++++ @@ -51,7 +48,8 @@ # Let's confirm the output type of the probabilities # is a list of dictionaries with onnxruntime. -sess = rt.InferenceSession(onx.SerializeToString()) +sess = rt.InferenceSession(onx.SerializeToString(), + providers=["CPUExecutionProvider"]) res = sess.run(None, {'float_input': X_test.astype(numpy.float32)}) print("skl", clr.predict_proba(X_test[:1])) print("onnx", res[1][:2]) @@ -66,7 +64,8 @@ onx2 = convert_sklearn(clr, initial_types=initial_type, options=options, target_opset=12) -sess2 = rt.InferenceSession(onx2.SerializeToString()) +sess2 = rt.InferenceSession(onx2.SerializeToString(), + providers=["CPUExecutionProvider"]) res2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)}) print("skl", clr.decision_function(X_test[:1])) print("onnx", res2[1][:2]) diff --git a/_downloads/a2a107f52312f36852a93dbbb6e1ae43/plot_intermediate_outputs.py b/_downloads/a2a107f52312f36852a93dbbb6e1ae43/plot_intermediate_outputs.py index 2b8a2d829..deb5483ed 100644 --- a/_downloads/a2a107f52312f36852a93dbbb6e1ae43/plot_intermediate_outputs.py +++ b/_downloads/a2a107f52312f36852a93dbbb6e1ae43/plot_intermediate_outputs.py @@ -12,9 +12,6 @@ One option is to look into the output of every node of the ONNX graph. -.. contents:: - :local: - Create and train a complex pipeline +++++++++++++++++++++++++++++++++++ diff --git a/_downloads/a5d11f3c18c8ea574c77d2dcbb8c37d5/plot_custom_model.py b/_downloads/a5d11f3c18c8ea574c77d2dcbb8c37d5/plot_custom_model.py index e4a3043b5..a2a687ad5 100644 --- a/_downloads/a5d11f3c18c8ea574c77d2dcbb8c37d5/plot_custom_model.py +++ b/_downloads/a5d11f3c18c8ea574c77d2dcbb8c37d5/plot_custom_model.py @@ -23,10 +23,6 @@ This example proposes a way to train a machine learned model which approximates the outputs of a *t-SNE* transformer. - -.. contents:: - :local: - Implementation of the new transform +++++++++++++++++++++++++++++++++++ diff --git a/_downloads/a63a19c27b1509bd3e8bda73ee6c752b/plot_custom_parser.py b/_downloads/a63a19c27b1509bd3e8bda73ee6c752b/plot_custom_parser.py index 2b246cb3f..d79b32bad 100644 --- a/_downloads/a63a19c27b1509bd3e8bda73ee6c752b/plot_custom_parser.py +++ b/_downloads/a63a19c27b1509bd3e8bda73ee6c752b/plot_custom_parser.py @@ -14,9 +14,6 @@ above a given threshold. That's implemented in method *validate*. -.. contents:: - :local: - Iris and scoring ++++++++++++++++ @@ -156,9 +153,19 @@ def validator_classifier_converter(scope, operator, container): # We now handle the validation. val_max = scope.get_unique_variable_name('val_max') - container.add_node('ReduceMax', val_prob.full_name, val_max, - name=scope.get_unique_operator_name('ReduceMax'), - axes=[1], keepdims=0) + if container.target_opset >= 18: + axis_name = scope.get_unique_variable_name('axis') + container.add_initializer( + axis_name, onnx_proto.TensorProto.INT64, [1], [1]) + container.add_node( + 'ReduceMax', [val_prob.full_name, axis_name], val_max, + name=scope.get_unique_operator_name('ReduceMax'), + keepdims=0) + else: + container.add_node( + 'ReduceMax', val_prob.full_name, val_max, + name=scope.get_unique_operator_name('ReduceMax'), + axes=[1], keepdims=0) th_name = scope.get_unique_variable_name('threshold') container.add_initializer( diff --git a/_downloads/a64427b155085c7393f301488fba90cc/plot_black_op.py b/_downloads/a64427b155085c7393f301488fba90cc/plot_black_op.py index 1ab04c24c..5969a7406 100644 --- a/_downloads/a64427b155085c7393f301488fba90cc/plot_black_op.py +++ b/_downloads/a64427b155085c7393f301488fba90cc/plot_black_op.py @@ -13,9 +13,6 @@ Some converters may convert a model in different ways if the users wants to blacklist some operators. -.. contents:: - :local: - GaussianMixture +++++++++++++++ @@ -49,7 +46,8 @@ model, X_train[:1].astype(np.float32), options={id(model): {'score_samples': True}}, target_opset=12) -sess = InferenceSession(model_onnx.SerializeToString()) +sess = InferenceSession(model_onnx.SerializeToString(), + providers=["CPUExecutionProvider"]) xt = X_test[:5].astype(np.float32) print(model.score_samples(xt)) @@ -86,7 +84,8 @@ options={id(model): {'score_samples': True}}, black_op={'ReduceLogSumExp'}, target_opset=12) -sess2 = InferenceSession(model_onnx2.SerializeToString()) +sess2 = InferenceSession(model_onnx2.SerializeToString(), + providers=["CPUExecutionProvider"]) xt = X_test[:5].astype(np.float32) print(model.score_samples(xt)) diff --git a/_downloads/adc88c5b05aa4301f4767ea809e0d852/plot_lcustom_options.ipynb b/_downloads/adc88c5b05aa4301f4767ea809e0d852/plot_lcustom_options.ipynb index 3476a8231..4fbc3cad0 100644 --- a/_downloads/adc88c5b05aa4301f4767ea809e0d852/plot_lcustom_options.ipynb +++ b/_downloads/adc88c5b05aa4301f4767ea809e0d852/plot_lcustom_options.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# A new converter with options\n\n.. index:: options\n\nOptions are used to implement different conversion\nfor a same model. The options can be used to replace\nan operator *MatMul* by the *Gemm* operator and compare the\nprocessing time for both graph. Let's see how to retrieve\nthe options within a converter.\n\nExample `l-plot-custom-converter` implements a converter\nwhich uses operator *MatMul*. Option *use_gemm* is used to\nreplace *MatMul* by *Gemm*.\n\n## Custom model\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom pandas import DataFrame\nfrom skl2onnx.tutorial import measure_time\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxSub, OnnxMatMul, OnnxGemm)\nfrom skl2onnx import to_onnx\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type\n\n\ndef decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n X = operator.inputs[0]\n\n dtype = guess_numpy_type(X.type)\n options = container.get_options(op, dict(use_gemm=False))\n use_gemm = options['use_gemm']\n print('conversion: use_gemm=', use_gemm)\n\n if use_gemm:\n Y = OnnxGemm(X, op.coef_.astype(dtype),\n (- op.mean_ @ op.coef_).astype(dtype),\n op_version=opv, alpha=1., beta=1.,\n output_names=out[:1])\n else:\n Y = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The registration needs to declare the options\nsupported by the converted.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter,\n options={'use_gemm': [True, False]})\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We try the non default option, `use_gemm: True`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx2 = to_onnx(dec, X.astype(numpy.float32),\n options={'use_gemm': True})\n\nsess2 = InferenceSession(onx2.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot2 = sess2.run(None, {'X': X.astype(numpy.float32)})[0]\n\nprint(diff(exp, got2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Visually.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx2)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Time comparison\n\nLet's compare the two computation.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X32 = X.astype(numpy.float32)\nobs = []\n\ncontext = {'sess': sess, 'X32': X32}\nmt = measure_time(\n \"sess.run(None, {'X': X32})\", context, div_by_number=True,\n number=100, repeat=1000)\nmt['use_gemm'] = False\nobs.append(mt)\n\ncontext = {'sess2': sess2, 'X32': X32}\nmt2 = measure_time(\n \"sess2.run(None, {'X': X32})\", context, div_by_number=True,\n number=10, repeat=100)\nmt2['use_gemm'] = True\nobs.append(mt2)\n\nDataFrame(obs).T" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# A new converter with options\n\n.. index:: options\n\nOptions are used to implement different conversion\nfor a same model. The options can be used to replace\nan operator *MatMul* by the *Gemm* operator and compare the\nprocessing time for both graph. Let's see how to retrieve\nthe options within a converter.\n\nExample `l-plot-custom-converter` implements a converter\nwhich uses operator *MatMul*. Option *use_gemm* is used to\nreplace *MatMul* by *Gemm*.\n\n## Custom model\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlprodict.onnxrt import OnnxInference\nfrom pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom pandas import DataFrame\nfrom skl2onnx.tutorial import measure_time\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxSub, OnnxMatMul, OnnxGemm)\nfrom skl2onnx import to_onnx\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX\n\nLet's try to convert it and see what happens.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type\n\n\ndef decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n X = operator.inputs[0]\n\n dtype = guess_numpy_type(X.type)\n options = container.get_options(op, dict(use_gemm=False))\n use_gemm = options['use_gemm']\n print('conversion: use_gemm=', use_gemm)\n\n if use_gemm:\n Y = OnnxGemm(X, op.coef_.astype(dtype),\n (- op.mean_ @ op.coef_).astype(dtype),\n op_version=opv, alpha=1., beta=1.,\n output_names=out[:1])\n else:\n Y = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n Y.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The registration needs to declare the options\nsupported by the converted.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter,\n options={'use_gemm': [True, False]})\n\n\nonx = to_onnx(dec, X.astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot = sess.run(None, {'X': X.astype(numpy.float32)})[0]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, got))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We try the non default option, `use_gemm: True`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx2 = to_onnx(dec, X.astype(numpy.float32),\n options={'use_gemm': True})\n\nsess2 = InferenceSession(onx2.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\ngot2 = sess2.run(None, {'X': X.astype(numpy.float32)})[0]\n\nprint(diff(exp, got2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visually.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx2)\nax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Time comparison\n\nLet's compare the two computation.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X32 = X.astype(numpy.float32)\nobs = []\n\ncontext = {'sess': sess, 'X32': X32}\nmt = measure_time(\n \"sess.run(None, {'X': X32})\", context, div_by_number=True,\n number=100, repeat=1000)\nmt['use_gemm'] = False\nobs.append(mt)\n\ncontext = {'sess2': sess2, 'X32': X32}\nmt2 = measure_time(\n \"sess2.run(None, {'X': X32})\", context, div_by_number=True,\n number=10, repeat=100)\nmt2['use_gemm'] = True\nobs.append(mt2)\n\nDataFrame(obs).T" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/ae426f7c0b6f299abebaf6024b7b02c3/plot_benchmark_pipeline.py b/_downloads/ae426f7c0b6f299abebaf6024b7b02c3/plot_benchmark_pipeline.py index 45b132223..8db485cc8 100644 --- a/_downloads/ae426f7c0b6f299abebaf6024b7b02c3/plot_benchmark_pipeline.py +++ b/_downloads/ae426f7c0b6f299abebaf6024b7b02c3/plot_benchmark_pipeline.py @@ -8,9 +8,6 @@ The following example checks up on every step in a pipeline, compares and benchmarks the predictions. -.. contents:: - :local: - Create a pipeline +++++++++++++++++ @@ -60,7 +57,8 @@ model_onnx = convert_sklearn(pipe, initial_types=initial_types, target_opset=12) -sess = rt.InferenceSession(model_onnx.SerializeToString()) +sess = rt.InferenceSession(model_onnx.SerializeToString(), + providers=["CPUExecutionProvider"]) print("skl predict_proba") print(pipe.predict_proba(X_digits[:2])) onx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1] @@ -106,7 +104,8 @@ for i, step in enumerate(steps): onnx_step = step['onnx_step'] - sess = rt.InferenceSession(onnx_step.SerializeToString()) + sess = rt.InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)}) skl_outputs = step['model']._debug.outputs if 'transform' in skl_outputs: diff --git a/_downloads/aed99bc8ade3f0f8a99b9ca3cc28d31e/plot_gpr.ipynb b/_downloads/aed99bc8ade3f0f8a99b9ca3cc28d31e/plot_gpr.ipynb index 785ea1602..70ee86beb 100644 --- a/_downloads/aed99bc8ade3f0f8a99b9ca3cc28d31e/plot_gpr.ipynb +++ b/_downloads/aed99bc8ade3f0f8a99b9ca3cc28d31e/plot_gpr.ipynb @@ -1,252 +1,252 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Discrepencies with GaussianProcessorRegressor: use of double\n\nThe `GaussianProcessRegressor\n`_ involves\nmany matrix operations which may requires double\nprecisions. *sklearn-onnx* is using single floats by default\nbut for this particular model, it is better to use double.\nLet's see how to create an ONNX file using doubles.\n\n## Train a model\n\nA very basic example using *GaussianProcessRegressor*\non the Boston dataset.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import pprint\nimport numpy\nimport sklearn\nfrom sklearn.datasets import load_boston\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import DotProduct, RBF\nfrom sklearn.model_selection import train_test_split\nimport onnx\nimport onnxruntime as rt\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType, DoubleTensorType\nfrom skl2onnx import convert_sklearn\n\nbost = load_boston()\nX, y = bost.data, bost.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\ngpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.)\ngpr.fit(X_train, y_train)\nprint(gpr)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## First attempt to convert a model into ONNX\n\nThe documentation suggests the following way to\nconvert a model into ONNX.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('X', FloatTensorType([None, X_train.shape[1]]))]\nonx = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess = rt.InferenceSession(onx.SerializeToString())\ntry:\n pred_onx = sess.run(\n None, {'X': X_test.astype(numpy.float32)})[0]\nexcept RuntimeError as e:\n print(str(e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Second attempt: variable dimensions\n\nUnfortunately, even though the conversion\nwent well, the runtime fails to compute the prediction.\nThe previous snippet of code imposes fixed dimension\non the input and therefore let the runtime assume\nevery node output has outputs with fixed dimensions\nAnd that's not the case for this model.\nWe need to disable these checkings by replacing\nthe fixed dimensions by an empty value.\n(see next line).\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('X', FloatTensorType([None, None]))]\nonx = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess = rt.InferenceSession(onx.SerializeToString())\npred_onx = sess.run(\n None, {'X': X_test.astype(numpy.float32)})[0]\n\npred_skl = gpr.predict(X_test)\nprint(pred_skl[:10])\nprint(pred_onx[0, :10])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The differences seems quite important.\nLet's confirm that by looking at the biggest\ndifferences.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) -\n numpy.squeeze(pred_onx)))[-5:]\nprint(diff)\nprint('min(Y)-max(Y):', min(y_test), max(y_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Third attempt: use of double\n\nThe model uses a couple of matrix computations\nand matrices have coefficients with very different\norder of magnitude. It is difficult to approximate\nthe prediction made with scikit-learn if the converted\nmodel sticks to float. Double precision is needed.\n\nThe previous code requires two changes. The first\none indicates that inputs are now of type\n``DoubleTensorType``. The second change\nis the extra parameter ``dtype=numpy.float64``\ntells the conversion function that every real\nconstant matrix such as the trained coefficients\nwill be dumped as doubles and not as floats anymore.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('X', DoubleTensorType([None, None]))]\nonx64 = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess64 = rt.InferenceSession(onx64.SerializeToString())\npred_onx64 = sess64.run(None, {'X': X_test})[0]\n\nprint(pred_onx64[0, :10])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The new differences look much better.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) -\n numpy.squeeze(pred_onx64)))[-5:]\nprint(diff)\nprint('min(Y)-max(Y):', min(y_test), max(y_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Size increase\n\nAs a result, the ONNX model is almost twice bigger\nbecause every coefficient is stored as double and\nand not as floats anymore.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "size32 = len(onx.SerializeToString())\nsize64 = len(onx64.SerializeToString())\nprint(\"ONNX with floats:\", size32)\nprint(\"ONNX with doubles:\", size64)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## return_std=True\n\n`GaussianProcessRegressor `_\nis one model which defined additional parameter to the predict function.\nIf call with ``return_std=True``, the class returns one more results\nand that needs to be reflected into the generated ONNX graph.\nThe converter needs to know that an extended graph is required.\nThat's done through the option mechanism\n(see `l-conv-options`).\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('X', DoubleTensorType([None, None]))]\noptions = {GaussianProcessRegressor: {'return_std': True}}\ntry:\n onx64_std = convert_sklearn(gpr, initial_types=initial_type,\n options=options, target_opset=12)\nexcept RuntimeError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This error highlights the fact that the *scikit-learn*\ncomputes internal variables on first call to method predict.\nThe converter needs them to be initialized by calling method\npredict at least once and then converting again.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "gpr.predict(X_test[:1], return_std=True)\nonx64_std = convert_sklearn(gpr, initial_types=initial_type,\n options=options, target_opset=12)\n\nsess64_std = rt.InferenceSession(onx64_std.SerializeToString())\npred_onx64_std = sess64_std.run(None, {'X': X_test[:5]})\n\npprint.pprint(pred_onx64_std)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's compare with *scikit-learn* prediction.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pprint.pprint(gpr.predict(X_test[:5], return_std=True))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It looks good. Let's do a better checks.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pred_onx64_std = sess64_std.run(None, {'X': X_test})\npred_std = gpr.predict(X_test, return_std=True)\n\n\ndiff = numpy.sort(numpy.abs(numpy.squeeze(pred_onx64_std[1]) -\n numpy.squeeze(pred_std[1])))[-5:]\nprint(diff)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are some discrepencies but it seems reasonable.\n\n**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Discrepencies with GaussianProcessorRegressor: use of double\n\nThe [GaussianProcessRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.\nGaussianProcessRegressor.html) involves\nmany matrix operations which may requires double\nprecisions. *sklearn-onnx* is using single floats by default\nbut for this particular model, it is better to use double.\nLet's see how to create an ONNX file using doubles.\n\n## Train a model\n\nA very basic example using *GaussianProcessRegressor*\non the Boston dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import pprint\nimport numpy\nimport sklearn\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import DotProduct, RBF\nfrom sklearn.model_selection import train_test_split\nimport onnx\nimport onnxruntime as rt\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType, DoubleTensorType\nfrom skl2onnx import convert_sklearn\n\ndataset = load_diabetes()\nX, y = dataset.data, dataset.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\ngpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.)\ngpr.fit(X_train, y_train)\nprint(gpr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## First attempt to convert a model into ONNX\n\nThe documentation suggests the following way to\nconvert a model into ONNX.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('X', FloatTensorType([None, X_train.shape[1]]))]\nonx = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess = rt.InferenceSession(onx.SerializeToString())\ntry:\n pred_onx = sess.run(\n None, {'X': X_test.astype(numpy.float32)})[0]\nexcept RuntimeError as e:\n print(str(e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Second attempt: variable dimensions\n\nUnfortunately, even though the conversion\nwent well, the runtime fails to compute the prediction.\nThe previous snippet of code imposes fixed dimension\non the input and therefore let the runtime assume\nevery node output has outputs with fixed dimensions\nAnd that's not the case for this model.\nWe need to disable these checkings by replacing\nthe fixed dimensions by an empty value.\n(see next line).\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('X', FloatTensorType([None, None]))]\nonx = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess = rt.InferenceSession(onx.SerializeToString())\npred_onx = sess.run(\n None, {'X': X_test.astype(numpy.float32)})[0]\n\npred_skl = gpr.predict(X_test)\nprint(pred_skl[:10])\nprint(pred_onx[0, :10])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The differences seems quite important.\nLet's confirm that by looking at the biggest\ndifferences.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) -\n numpy.squeeze(pred_onx)))[-5:]\nprint(diff)\nprint('min(Y)-max(Y):', min(y_test), max(y_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Third attempt: use of double\n\nThe model uses a couple of matrix computations\nand matrices have coefficients with very different\norder of magnitude. It is difficult to approximate\nthe prediction made with scikit-learn if the converted\nmodel sticks to float. Double precision is needed.\n\nThe previous code requires two changes. The first\none indicates that inputs are now of type\n``DoubleTensorType``. The second change\nis the extra parameter ``dtype=numpy.float64``\ntells the conversion function that every real\nconstant matrix such as the trained coefficients\nwill be dumped as doubles and not as floats anymore.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('X', DoubleTensorType([None, None]))]\nonx64 = convert_sklearn(gpr, initial_types=initial_type,\n target_opset=12)\n\nsess64 = rt.InferenceSession(onx64.SerializeToString())\npred_onx64 = sess64.run(None, {'X': X_test})[0]\n\nprint(pred_onx64[0, :10])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The new differences look much better.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) -\n numpy.squeeze(pred_onx64)))[-5:]\nprint(diff)\nprint('min(Y)-max(Y):', min(y_test), max(y_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Size increase\n\nAs a result, the ONNX model is almost twice bigger\nbecause every coefficient is stored as double and\nand not as floats anymore.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "size32 = len(onx.SerializeToString())\nsize64 = len(onx64.SerializeToString())\nprint(\"ONNX with floats:\", size32)\nprint(\"ONNX with doubles:\", size64)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## return_std=True\n\n[GaussianProcessRegressor](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.gaussian_process.GaussianProcessRegressor.html)\nis one model which defined additional parameter to the predict function.\nIf call with ``return_std=True``, the class returns one more results\nand that needs to be reflected into the generated ONNX graph.\nThe converter needs to know that an extended graph is required.\nThat's done through the option mechanism\n(see `l-conv-options`).\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('X', DoubleTensorType([None, None]))]\noptions = {GaussianProcessRegressor: {'return_std': True}}\ntry:\n onx64_std = convert_sklearn(gpr, initial_types=initial_type,\n options=options, target_opset=12)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This error highlights the fact that the *scikit-learn*\ncomputes internal variables on first call to method predict.\nThe converter needs them to be initialized by calling method\npredict at least once and then converting again.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "gpr.predict(X_test[:1], return_std=True)\nonx64_std = convert_sklearn(gpr, initial_types=initial_type,\n options=options, target_opset=12)\n\nsess64_std = rt.InferenceSession(onx64_std.SerializeToString())\npred_onx64_std = sess64_std.run(None, {'X': X_test[:5]})\n\npprint.pprint(pred_onx64_std)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's compare with *scikit-learn* prediction.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pprint.pprint(gpr.predict(X_test[:5], return_std=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It looks good. Let's do a better checks.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pred_onx64_std = sess64_std.run(None, {'X': X_test})\npred_std = gpr.predict(X_test, return_std=True)\n\n\ndiff = numpy.sort(numpy.abs(numpy.squeeze(pred_onx64_std[1]) -\n numpy.squeeze(pred_std[1])))[-5:]\nprint(diff)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are some discrepencies but it seems reasonable.\n\n**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/b63a09caa8878af3c7bdcc0676f2c138/plot_ebegin_float_double.ipynb b/_downloads/b63a09caa8878af3c7bdcc0676f2c138/plot_ebegin_float_double.ipynb index 243737758..dfa30fb72 100644 --- a/_downloads/b63a09caa8878af3c7bdcc0676f2c138/plot_ebegin_float_double.ipynb +++ b/_downloads/b63a09caa8878af3c7bdcc0676f2c138/plot_ebegin_float_double.ipynb @@ -1,266 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Issues when switching to float\n\n.. index:: float, double, discrepencies\n\nMost models in :epkg:`scikit-learn` do computation with double,\nnot float. Most models in deep learning use float because\nthat's the most common situation with GPU. ONNX was initially\ncreated to facilitate the deployment of deep learning models\nand that explains why many converters assume the converted models\nshould use float. That assumption does not usually harm\nthe predictions, the conversion to float introduce small\ndiscrepencies compare to double predictions.\nThat assumption is usually true if the prediction\nfunction is continuous, $y = f(x)$, then\n$dy = f'(x) dx$. We can determine an upper bound\nto the discrepencies :\n$\\Delta(y) \\leqslant \\sup_x \\left\\Vert f'(x)\\right\\Vert dx$.\n*dx* is the discrepency introduced by a float conversion,\n``dx = x - numpy.float32(x)``.\n\nHowever, that's not the case for every model. A decision tree\ntrained for a regression is not a continuous function. Therefore,\neven a small *dx* may introduce a huge discrepency. Let's look into\nan example which always produces discrepencies and some ways\nto overcome this situation.\n\n## More into the issue\n\nThe below example is built to fail.\nIt contains integer features with different order\nof magnitude rounded to integer. A decision tree compares\nfeatures to thresholds. In most cases, float and double\ncomparison gives the same result. We denote\n$[x]_{f32}$ the conversion (or cast)\n``numpy.float32(x)``.\n\n\\begin{align}x \\leqslant y = [x]_{f32} \\leqslant [y]_{f32}\\end{align}\n\nHowever, the probability that both comparisons give\ndifferent results is not null. The following graph shows\nthe discord areas.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from mlprodict.sklapi import OnnxPipeline\nfrom skl2onnx.sklapi import CastTransformer, CastRegressor\nfrom skl2onnx import to_onnx\nfrom mlprodict.onnx_conv import to_onnx as to_onnx_extended\nfrom mlprodict.onnxrt import OnnxInference\nfrom onnxruntime import InferenceSession\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import make_regression\nimport numpy\nimport matplotlib.pyplot as plt\n\n\ndef area_mismatch_rule(N, delta, factor, rule=None):\n if rule is None:\n def rule(t): return numpy.float32(t)\n xst = []\n yst = []\n xsf = []\n ysf = []\n for x in range(-N, N):\n for y in range(-N, N):\n dx = (1. + x * delta) * factor\n dy = (1. + y * delta) * factor\n c1 = 1 if numpy.float64(dx) <= numpy.float64(dy) else 0\n c2 = 1 if numpy.float32(dx) <= rule(dy) else 0\n key = abs(c1 - c2)\n if key == 1:\n xsf.append(dx)\n ysf.append(dy)\n else:\n xst.append(dx)\n yst.append(dy)\n return xst, yst, xsf, ysf\n\n\ndelta = 36e-10\nfactor = 1\nxst, yst, xsf, ysf = area_mismatch_rule(100, delta, factor)\n\n\nfig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(xst, yst, '.', label=\"agree\")\nax.plot(xsf, ysf, '.', label=\"disagree\")\nax.set_title(\"Region where x <= y and (float)x <= (float)y agree\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.plot([min(xst), max(xst)], [min(yst), max(yst)], 'k--')\nax.legend()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The pipeline and the data\n\nWe can now build an example where the learned decision tree\ndoes many comparisons in this discord area. This is done\nby rounding features to integers, a frequent case\nhappening when dealing with categorical features.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X, y = make_regression(10000, 10)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nXi_train, yi_train = X_train.copy(), y_train.copy()\nXi_test, yi_test = X_test.copy(), y_test.copy()\nfor i in range(X.shape[1]):\n Xi_train[:, i] = (Xi_train[:, i] * 2 ** i).astype(numpy.int64)\n Xi_test[:, i] = (Xi_test[:, i] * 2 ** i).astype(numpy.int64)\n\nmax_depth = 10\n\nmodel = Pipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel.fit(Xi_train, yi_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The discrepencies\n\nLet's reuse the function implemented in the\nfirst example `l-diff-dicrepencies` and\nlook into the conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nonx = to_onnx(model, Xi_train[:1].astype(numpy.float32))\n\nsess = InferenceSession(onx.SerializeToString())\n\nX32 = Xi_test.astype(numpy.float32)\n\nskl = model.predict(X32)\nort = sess.run(None, {'X': X32})[0]\n\nprint(diff(skl, ort))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The discrepencies are significant.\nThe ONNX model keeps float at every step.\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_float32 -> dtree -> z_float32\n }\n\nIn :epkg:`scikit-learn`:\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_double -> dtree -> z_double\n }\n\n## CastTransformer\n\nWe could try to use double everywhere. Unfortunately,\n:epkg:`ONNX ML Operators` only allows float coefficients\nfor the operator *TreeEnsembleRegressor*. We may want\nto compromise by casting the output of the normalizer into\nfloat in the :epkg:`scikit-learn` pipeline.\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_double ->\n cast -> y_float -> dtree -> z_float\n }\n\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model2 = Pipeline([\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel2.fit(Xi_train, yi_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The discrepencies.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32))\n\nsess2 = InferenceSession(onx2.SerializeToString())\n\nskl2 = model2.predict(X32)\nort2 = sess2.run(None, {'X': X32})[0]\n\nprint(diff(skl2, ort2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That still fails because the normalizer\nin :epkg:`scikit-learn` and in :epkg:`ONNX`\nuse different types. The cast still happens and\nthe *dx* is still here. To remove it, we need to use\ndouble in ONNX normalizer.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model3 = Pipeline([\n ('cast64', CastTransformer(dtype=numpy.float64)),\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel3.fit(Xi_train, yi_train)\nonx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32),\n options={StandardScaler: {'div': 'div_cast'}})\n\nsess3 = InferenceSession(onx3.SerializeToString())\n\nskl3 = model3.predict(X32)\nort3 = sess3.run(None, {'X': X32})[0]\n\nprint(diff(skl3, ort3))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It works. That also means that it is difficult to change\nthe computation type when a pipeline includes a discontinuous\nfunction. It is better to keep the same types all along\nbefore using a decision tree.\n\n## Sledgehammer\n\nThe idea here is to always train the next step based\non ONNX outputs. That way, every step of the pipeline\nis trained based on ONNX output.\n\n* Trains the first step.\n* Converts the step into ONNX\n* Computes ONNX outputs.\n* Trains the second step on these outputs.\n* Converts the second step into ONNX.\n* Merges it with the first step.\n* Computes ONNX outputs of the merged two first steps.\n* ...\n\nIt is implemented in\nclass :epkg:`OnnxPipeline`.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onx = OnnxPipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel_onx.fit(Xi_train, yi_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32))\nexcept ValueError as e:\n print(\"Failing due to %r.\\nYou need to update mlprodict.\" % e)\n import sys\n sys.exit(0)\n\nsess4 = InferenceSession(onx4.SerializeToString())\n\nskl4 = model_onx.predict(X32)\nort4 = sess4.run(None, {'X': X32})[0]\n\nprint(diff(skl4, ort4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It works too in a more simple way.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## No discrepencies at all?\n\nIs it possible to get no error at all?\nThere is one major obstacle: :epkg:`scikit-learn`\nstores the predicted values in every leave with double\n(`_tree.pyx - _get_value_ndarray\n`_), :epkg:`ONNX` defines the\nthe predicted values as floats: :epkg:`TreeEnsembleRegressor`.\nWhat can we do to solve it?\nWhat if we could extend ONNX specifications to support\ndouble instead of floats.\nWe reuse what was developped in example\n`Other way to convert `_\nand a custom ONNX node `TreeEnsembleRegressorDouble\n`_.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "tree = DecisionTreeRegressor(max_depth=max_depth)\ntree.fit(Xi_train, yi_train)\n\nmodel_onx = to_onnx_extended(tree, Xi_train[:1].astype(numpy.float64),\n rewrite_ops=True)\n\noinf5 = OnnxInference(model_onx, runtime='python_compiled')\nprint(oinf5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's measure the discrepencies.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X64 = Xi_test.astype(numpy.float64)\nskl5 = tree.predict(X64)\nort5 = oinf5.run({'X': X64})['variable']" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Perfect, no discrepencies at all.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(diff(skl5, ort5))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CastRegressor\n\nThe previous example demonstrated the type difference for\nthe predicted values explains the small differences between\n:epkg:`scikit-learn` and :epkg:`onnxruntime`. But it does not\nwith the current ONNX. Another option is to cast the\nthe predictions into floats in the :epkg:`scikit-learn` pipeline.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ctree = CastRegressor(DecisionTreeRegressor(max_depth=max_depth))\nctree.fit(Xi_train, yi_train)\n\nonx6 = to_onnx(ctree, Xi_train[:1].astype(numpy.float32))\n\nsess6 = InferenceSession(onx6.SerializeToString())\n\nskl6 = ctree.predict(X32)\nort6 = sess6.run(None, {'X': X32})[0]\n\nprint(diff(skl6, ort6))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Success!\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Issues when switching to float\n\n.. index:: float, double, discrepencies\n\nMost models in :epkg:`scikit-learn` do computation with double,\nnot float. Most models in deep learning use float because\nthat's the most common situation with GPU. ONNX was initially\ncreated to facilitate the deployment of deep learning models\nand that explains why many converters assume the converted models\nshould use float. That assumption does not usually harm\nthe predictions, the conversion to float introduce small\ndiscrepencies compare to double predictions.\nThat assumption is usually true if the prediction\nfunction is continuous, $y = f(x)$, then\n$dy = f'(x) dx$. We can determine an upper bound\nto the discrepencies :\n$\\Delta(y) \\leqslant \\sup_x \\left\\Vert f'(x)\\right\\Vert dx$.\n*dx* is the discrepency introduced by a float conversion,\n``dx = x - numpy.float32(x)``.\n\nHowever, that's not the case for every model. A decision tree\ntrained for a regression is not a continuous function. Therefore,\neven a small *dx* may introduce a huge discrepency. Let's look into\nan example which always produces discrepencies and some ways\nto overcome this situation.\n\n## More into the issue\n\nThe below example is built to fail.\nIt contains integer features with different order\nof magnitude rounded to integer. A decision tree compares\nfeatures to thresholds. In most cases, float and double\ncomparison gives the same result. We denote\n$[x]_{f32}$ the conversion (or cast)\n``numpy.float32(x)``.\n\n\\begin{align}x \\leqslant y = [x]_{f32} \\leqslant [y]_{f32}\\end{align}\n\nHowever, the probability that both comparisons give\ndifferent results is not null. The following graph shows\nthe discord areas.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mlprodict.sklapi import OnnxPipeline\nfrom skl2onnx.sklapi import CastTransformer\nfrom skl2onnx import to_onnx\nfrom onnxruntime import InferenceSession\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import make_regression\nimport numpy\nimport matplotlib.pyplot as plt\n\n\ndef area_mismatch_rule(N, delta, factor, rule=None):\n if rule is None:\n def rule(t): return numpy.float32(t)\n xst = []\n yst = []\n xsf = []\n ysf = []\n for x in range(-N, N):\n for y in range(-N, N):\n dx = (1. + x * delta) * factor\n dy = (1. + y * delta) * factor\n c1 = 1 if numpy.float64(dx) <= numpy.float64(dy) else 0\n c2 = 1 if numpy.float32(dx) <= rule(dy) else 0\n key = abs(c1 - c2)\n if key == 1:\n xsf.append(dx)\n ysf.append(dy)\n else:\n xst.append(dx)\n yst.append(dy)\n return xst, yst, xsf, ysf\n\n\ndelta = 36e-10\nfactor = 1\nxst, yst, xsf, ysf = area_mismatch_rule(100, delta, factor)\n\n\nfig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(xst, yst, '.', label=\"agree\")\nax.plot(xsf, ysf, '.', label=\"disagree\")\nax.set_title(\"Region where x <= y and (float)x <= (float)y agree\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.plot([min(xst), max(xst)], [min(yst), max(yst)], 'k--')\nax.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The pipeline and the data\n\nWe can now build an example where the learned decision tree\ndoes many comparisons in this discord area. This is done\nby rounding features to integers, a frequent case\nhappening when dealing with categorical features.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X, y = make_regression(10000, 10)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nXi_train, yi_train = X_train.copy(), y_train.copy()\nXi_test, yi_test = X_test.copy(), y_test.copy()\nfor i in range(X.shape[1]):\n Xi_train[:, i] = (Xi_train[:, i] * 2 ** i).astype(numpy.int64)\n Xi_test[:, i] = (Xi_test[:, i] * 2 ** i).astype(numpy.int64)\n\nmax_depth = 10\n\nmodel = Pipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel.fit(Xi_train, yi_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The discrepencies\n\nLet's reuse the function implemented in the\nfirst example `l-diff-dicrepencies` and\nlook into the conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nonx = to_onnx(model, Xi_train[:1].astype(numpy.float32),\n target_opset=15)\n\nsess = InferenceSession(onx.SerializeToString())\n\nX32 = Xi_test.astype(numpy.float32)\n\nskl = model.predict(X32)\nort = sess.run(None, {'X': X32})[0]\n\nprint(diff(skl, ort))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The discrepencies are significant.\nThe ONNX model keeps float at every step.\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_float32 -> dtree -> z_float32\n }\n\nIn :epkg:`scikit-learn`:\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_double -> dtree -> z_double\n }\n\n## CastTransformer\n\nWe could try to use double everywhere. Unfortunately,\n:epkg:`ONNX ML Operators` only allows float coefficients\nfor the operator *TreeEnsembleRegressor*. We may want\nto compromise by casting the output of the normalizer into\nfloat in the :epkg:`scikit-learn` pipeline.\n\n.. blockdiag::\n\n diagram {\n x_float32 -> normalizer -> y_double ->\n cast -> y_float -> dtree -> z_float\n }\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model2 = Pipeline([\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel2.fit(Xi_train, yi_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The discrepencies.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32),\n target_opset=15)\n\nsess2 = InferenceSession(onx2.SerializeToString())\n\nskl2 = model2.predict(X32)\nort2 = sess2.run(None, {'X': X32})[0]\n\nprint(diff(skl2, ort2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That still fails because the normalizer\nin :epkg:`scikit-learn` and in :epkg:`ONNX`\nuse different types. The cast still happens and\nthe *dx* is still here. To remove it, we need to use\ndouble in ONNX normalizer.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model3 = Pipeline([\n ('cast64', CastTransformer(dtype=numpy.float64)),\n ('scaler', StandardScaler()),\n ('cast', CastTransformer()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel3.fit(Xi_train, yi_train)\nonx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32),\n options={StandardScaler: {'div': 'div_cast'}},\n target_opset=15)\n\nsess3 = InferenceSession(onx3.SerializeToString())\n\nskl3 = model3.predict(X32)\nort3 = sess3.run(None, {'X': X32})[0]\n\nprint(diff(skl3, ort3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It works. That also means that it is difficult to change\nthe computation type when a pipeline includes a discontinuous\nfunction. It is better to keep the same types all along\nbefore using a decision tree.\n\n## Sledgehammer\n\nThe idea here is to always train the next step based\non ONNX outputs. That way, every step of the pipeline\nis trained based on ONNX output.\n\n* Trains the first step.\n* Converts the step into ONNX\n* Computes ONNX outputs.\n* Trains the second step on these outputs.\n* Converts the second step into ONNX.\n* Merges it with the first step.\n* Computes ONNX outputs of the merged two first steps.\n* ...\n\nIt is implemented in\nclass :epkg:`OnnxPipeline`.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onx = OnnxPipeline([\n ('scaler', StandardScaler()),\n ('dt', DecisionTreeRegressor(max_depth=max_depth))\n])\n\nmodel_onx.fit(Xi_train, yi_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By using opset 17 and opset 3 for domain ai.onnx.ml, the tree thresholds\ncan be stored as double and not float anymore. That lowerss the discrepancies\neven if the outputs are still float.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32),\n target_opset=17)\n\nsess4 = InferenceSession(onx4.SerializeToString())\n\nskl4 = model_onx.predict(X32)\nort4 = sess4.run(None, {'X': X32})[0]\n\nprint(diff(skl4, ort4))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/b9af95a3109a56c3cf58515344cf9290/plot_wext_pyod_forest.ipynb b/_downloads/b9af95a3109a56c3cf58515344cf9290/plot_wext_pyod_forest.ipynb index 5ec35055c..2d0617f0a 100644 --- a/_downloads/b9af95a3109a56c3cf58515344cf9290/plot_wext_pyod_forest.ipynb +++ b/_downloads/b9af95a3109a56c3cf58515344cf9290/plot_wext_pyod_forest.ipynb @@ -1,162 +1,162 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Converter for pyod.models.iforest.IForest\n\n.. index:: pyod, iforest\n\nThis example answers issues `685\n`_.\nIt implements a custom converter for model `pyod.models.iforest.IForest\n`_.\nThis example uses `l-plot-custom-converter` as a start.\n\n## Trains a model\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand *lightgbm*.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy as np\nimport pandas as pd\nfrom onnxruntime import InferenceSession\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common.data_types import (\n FloatTensorType, Int64TensorType, guess_numpy_type)\nfrom skl2onnx import to_onnx, update_registered_converter, get_model_alias\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxIdentity, OnnxMul, OnnxLess, OnnxConcat, OnnxCast, OnnxAdd,\n OnnxClip)\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\ntry:\n from pyod.models.iforest import IForest\nexcept (ValueError, ImportError) as e:\n print(\"Unable to import pyod:\", e)\n IForest = None\n\nif IForest is not None:\n data1 = {'First': [500, 500, 400, 100, 200, 300, 100],\n 'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']}\n\n df1 = pd.DataFrame(data1, columns=['First', 'Second'])\n dumdf1 = pd.get_dummies(df1)\n scaler = MinMaxScaler()\n scaler.partial_fit(dumdf1)\n sc_data = scaler.transform(dumdf1)\n model1 = IForest(n_estimators=10, bootstrap=True, behaviour='new',\n contamination=0.1, random_state=np.random.RandomState(42),\n verbose=1, n_jobs=-1).fit(sc_data)\n feature_names2 = dumdf1.columns\n\n initial_type = [('float_input',\n FloatTensorType([None, len(feature_names2)]))]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We check that the conversion fails as expected.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "if IForest is not None:\n try:\n to_onnx(model1, initial_types=initial_type)\n except Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom converter\n\nFirst the parser and the shape calculator.\nThe parser defines the number of outputs and their type.\nThe shape calculator defines their dimensions.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def pyod_iforest_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n cls_type = inputs[0].type.__class__\n val_y1 = scope.declare_local_variable('label', Int64TensorType())\n val_y2 = scope.declare_local_variable('probability', cls_type())\n this_operator.outputs.append(val_y1)\n this_operator.outputs.append(val_y2)\n\n # end\n return this_operator.outputs\n\n\ndef pyod_iforest_shape_calculator(operator):\n N = operator.inputs[0].get_first_dimension()\n operator.outputs[0].type.shape = [N, 1]\n operator.outputs[1].type.shape = [N, 2]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def pyod_iforest_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n dtype = guess_numpy_type(X.type)\n\n detector = op.detector_ # Should be IForest from scikit-learn.\n lab_pred = OnnxSubEstimator(detector, X, op_version=opv)\n scores = OnnxIdentity(lab_pred[1], op_version=opv)\n\n # labels\n threshold = op.threshold_\n above = OnnxLess(scores, np.array([threshold], dtype=dtype),\n op_version=opv)\n labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64,\n output_names=out[:1])\n\n # probabilities\n train_scores = op.decision_scores_\n scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))\n scores_ = OnnxMul(scores, np.array([-1], dtype=dtype),\n op_version=opv)\n print(scaler.min_)\n print(scaler.scale_)\n\n scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)\n scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype),\n op_version=opv)\n clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype),\n np.array([1], dtype=dtype),\n op_version=opv)\n clipped_ = OnnxAdd(\n OnnxMul(clipped, np.array([-1], dtype=dtype),\n op_version=opv),\n np.array([1], dtype=dtype),\n op_version=opv)\n\n scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv,\n output_names=out[1:])\n\n labels.add_to(scope, container)\n scores_2d.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally the registration.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "if IForest is not None:\n update_registered_converter(\n IForest, \"PyodIForest\",\n pyod_iforest_shape_calculator,\n pyod_iforest_converter,\n parser=pyod_iforest_parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And the conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "if IForest is not None:\n onx = to_onnx(model1, initial_types=initial_type,\n target_opset={'': 14, 'ai.onnx.ml': 2})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Checking discrepencies\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "if IForest is not None:\n data = sc_data.astype(np.float32)\n\n expected_labels = model1.predict(data)\n expected_proba = model1.predict_proba(data)\n\n sess = InferenceSession(onx.SerializeToString())\n res = sess.run(None, {'float_input': data})\n\n onx_labels = res[0]\n onx_proba = res[1]\n\n diff_labels = np.abs(onx_labels.ravel() - expected_labels.ravel()).max()\n diff_proba = np.abs(onx_proba.ravel() - expected_proba.ravel()).max()\n\n print(\"dicrepencies:\", diff_labels, diff_proba)\n\n print(\"ONNX labels\", onx_labels)\n print(\"ONNX probabilities\", onx_proba)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Converter for pyod.models.iforest.IForest\n\n.. index:: pyod, iforest\n\nThis example answers issues [685](https://github.com/onnx/sklearn-onnx/issues/685).\nIt implements a custom converter for model [pyod.models.iforest.IForest](https://pyod.readthedocs.io/en/latest/\npyod.models.html#module-pyod.models.iforest).\nThis example uses `l-plot-custom-converter` as a start.\n\n## Trains a model\n\nAll imports. It also registered onnx converters for :epgk:`xgboost`\nand *lightgbm*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy as np\nimport pandas as pd\nfrom onnxruntime import InferenceSession\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common.data_types import (\n FloatTensorType, Int64TensorType, guess_numpy_type)\nfrom skl2onnx import to_onnx, update_registered_converter, get_model_alias\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxIdentity, OnnxMul, OnnxLess, OnnxConcat, OnnxCast, OnnxAdd,\n OnnxClip)\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\ntry:\n from pyod.models.iforest import IForest\nexcept (ValueError, ImportError) as e:\n print(\"Unable to import pyod:\", e)\n IForest = None\n\nif IForest is not None:\n data1 = {'First': [500, 500, 400, 100, 200, 300, 100],\n 'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']}\n\n df1 = pd.DataFrame(data1, columns=['First', 'Second'])\n dumdf1 = pd.get_dummies(df1)\n scaler = MinMaxScaler()\n scaler.partial_fit(dumdf1)\n sc_data = scaler.transform(dumdf1)\n model1 = IForest(n_estimators=10, bootstrap=True, behaviour='new',\n contamination=0.1, random_state=np.random.RandomState(42),\n verbose=1, n_jobs=-1).fit(sc_data)\n feature_names2 = dumdf1.columns\n\n initial_type = [('float_input',\n FloatTensorType([None, len(feature_names2)]))]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We check that the conversion fails as expected.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if IForest is not None:\n try:\n to_onnx(model1, initial_types=initial_type)\n except Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom converter\n\nFirst the parser and the shape calculator.\nThe parser defines the number of outputs and their type.\nThe shape calculator defines their dimensions.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def pyod_iforest_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n cls_type = inputs[0].type.__class__\n val_y1 = scope.declare_local_variable('label', Int64TensorType())\n val_y2 = scope.declare_local_variable('probability', cls_type())\n this_operator.outputs.append(val_y1)\n this_operator.outputs.append(val_y2)\n\n # end\n return this_operator.outputs\n\n\ndef pyod_iforest_shape_calculator(operator):\n N = operator.inputs[0].get_first_dimension()\n operator.outputs[0].type.shape = [N, 1]\n operator.outputs[1].type.shape = [N, 2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def pyod_iforest_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n # We retrieve the unique input.\n X = operator.inputs[0]\n\n # In most case, computation happen in floats.\n # But it might be with double. ONNX is very strict\n # about types, every constant should have the same\n # type as the input.\n dtype = guess_numpy_type(X.type)\n\n detector = op.detector_ # Should be IForest from scikit-learn.\n lab_pred = OnnxSubEstimator(detector, X, op_version=opv)\n scores = OnnxIdentity(lab_pred[1], op_version=opv)\n\n # labels\n threshold = op.threshold_\n above = OnnxLess(scores, np.array([threshold], dtype=dtype),\n op_version=opv)\n labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64,\n output_names=out[:1])\n\n # probabilities\n train_scores = op.decision_scores_\n scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))\n scores_ = OnnxMul(scores, np.array([-1], dtype=dtype),\n op_version=opv)\n print(scaler.min_)\n print(scaler.scale_)\n\n scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)\n scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype),\n op_version=opv)\n clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype),\n np.array([1], dtype=dtype),\n op_version=opv)\n clipped_ = OnnxAdd(\n OnnxMul(clipped, np.array([-1], dtype=dtype),\n op_version=opv),\n np.array([1], dtype=dtype),\n op_version=opv)\n\n scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv,\n output_names=out[1:])\n\n labels.add_to(scope, container)\n scores_2d.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally the registration.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if IForest is not None:\n update_registered_converter(\n IForest, \"PyodIForest\",\n pyod_iforest_shape_calculator,\n pyod_iforest_converter,\n parser=pyod_iforest_parser)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And the conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if IForest is not None:\n onx = to_onnx(model1, initial_types=initial_type,\n target_opset={'': 14, 'ai.onnx.ml': 2})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Checking discrepencies\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if IForest is not None:\n data = sc_data.astype(np.float32)\n\n expected_labels = model1.predict(data)\n expected_proba = model1.predict_proba(data)\n\n sess = InferenceSession(onx.SerializeToString())\n res = sess.run(None, {'float_input': data})\n\n onx_labels = res[0]\n onx_proba = res[1]\n\n diff_labels = np.abs(onx_labels.ravel() - expected_labels.ravel()).max()\n diff_proba = np.abs(onx_proba.ravel() - expected_proba.ravel()).max()\n\n print(\"dicrepencies:\", diff_labels, diff_proba)\n\n print(\"ONNX labels\", onx_labels)\n print(\"ONNX probabilities\", onx_proba)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/bcc2f9581603261581d5e3343c465570/plot_gexternal_lightgbm.py b/_downloads/bcc2f9581603261581d5e3343c465570/plot_gexternal_lightgbm.py index da0dddfc1..f0d6c6546 100644 --- a/_downloads/bcc2f9581603261581d5e3343c465570/plot_gexternal_lightgbm.py +++ b/_downloads/bcc2f9581603261581d5e3343c465570/plot_gexternal_lightgbm.py @@ -16,9 +16,6 @@ the whole pipeline as long as it knows the converter associated to a *LGBMClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a LightGBM classifier +++++++++++++++++++++++++++ """ diff --git a/_downloads/c307eabd3f7d56c26f38e82d8747879c/plot_jcustom_syntax.py b/_downloads/c307eabd3f7d56c26f38e82d8747879c/plot_jcustom_syntax.py index 9c73bf3be..f509507c5 100644 --- a/_downloads/c307eabd3f7d56c26f38e82d8747879c/plot_jcustom_syntax.py +++ b/_downloads/c307eabd3f7d56c26f38e82d8747879c/plot_jcustom_syntax.py @@ -18,10 +18,6 @@ This one demonstrates the second way which is usually the one used in other converter library. It is more verbose. -.. contents:: - :local: - - Custom model ++++++++++++ diff --git a/_downloads/c3cd2ce44ef67e387417023e85d3859f/plot_gbegin_transfer_learning.ipynb b/_downloads/c3cd2ce44ef67e387417023e85d3859f/plot_gbegin_transfer_learning.ipynb index 2adfbcc22..c8447d3a0 100644 --- a/_downloads/c3cd2ce44ef67e387417023e85d3859f/plot_gbegin_transfer_learning.ipynb +++ b/_downloads/c3cd2ce44ef67e387417023e85d3859f/plot_gbegin_transfer_learning.ipynb @@ -1,277 +1,277 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Transfer Learning with ONNX\n\n.. index:: transfer learning, deep learning\n\nTransfer learning is common with deep learning.\nA deep learning model is used as preprocessing before\nthe output is sent to a final classifier or regressor.\nIt is not quite easy in this case to mix framework,\n:epkg:`scikit-learn` with :epkg:`pytorch`\n(or :epkg:`skorch`), the Keras API for Tensorflow,\n`tf.keras.wrappers.scikit_learn\n`_. Every combination\nrequires work. ONNX reduces the number of platforms to\nsupport. Once the model is converted into ONNX,\nit can be inserted in any :epkg:`scikit-learn` pipeline.\n\n## Retrieve and load a model\n\nWe download one model from the :epkg:`ONNX Zoo` but the model\ncould be trained and produced by another converter library.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sys\nfrom io import BytesIO\nimport onnx\nfrom mlprodict.sklapi import OnnxTransformer\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom mlinsights.plotting.gallery import plot_gallery_images\nimport matplotlib.pyplot as plt\nfrom skl2onnx.tutorial.imagenet_classes import class_names\nimport numpy\nfrom PIL import Image\nfrom onnxruntime import InferenceSession\nfrom onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument\nimport os\nimport urllib.request\n\n\ndef download_file(url, name, min_size):\n if not os.path.exists(name):\n print(\"download '%s'\" % url)\n with urllib.request.urlopen(url) as u:\n content = u.read()\n if len(content) < min_size:\n raise RuntimeError(\n \"Unable to download '{}' due to\\n{}\".format(\n url, content))\n print(\"downloaded %d bytes.\" % len(content))\n with open(name, \"wb\") as f:\n f.write(content)\n else:\n print(\"'%s' already downloaded\" % name)\n\n\nmodel_name = \"squeezenet1.1-7.onnx\"\nurl_name = (\"https://github.com/onnx/models/raw/main/vision/\"\n \"classification/squeezenet/model\")\nurl_name += \"/\" + model_name\ntry:\n download_file(url_name, model_name, 100000)\nexcept RuntimeError as e:\n print(e)\n sys.exit(1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Loading the ONNX file and use it on one image.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(model_name)\n\nfor inp in sess.get_inputs():\n print(inp)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model expects a series of images of size\n`[3, 224, 224]`.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Classifying an image\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "url = (\"https://upload.wikimedia.org/wikipedia/commons/d/d2/\"\n \"East_Coker_elm%2C_2.jpg\")\nimg = \"East_Coker_elm.jpg\"\ndownload_file(url, img, 100000)\n\nim0 = Image.open(img)\nim = im0.resize((224, 224))\n# im.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Image to numpy and predection.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def im2array(im):\n X = numpy.asarray(im)\n X = X.transpose(2, 0, 1)\n X = X.reshape(1, 3, 224, 224)\n return X\n\n\nX = im2array(im)\nout = sess.run(None, {'data': X.astype(numpy.float32)})\nout = out[0]\n\nprint(out[0, :5])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Interpretation\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "res = list(sorted((r, class_names[i]) for i, r in enumerate(out[0])))\nprint(res[-5:])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Classifying more images\n\nThe initial image is rotated,\nthe answer is changing.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "angles = [a * 2. for a in range(-6, 6)]\nimgs = [(angle, im0.rotate(angle).resize((224, 224)))\n for angle in angles]\n\n\ndef classify(imgs):\n labels = []\n for angle, img in imgs:\n X = im2array(img)\n probs = sess.run(None, {'data': X.astype(numpy.float32)})[0]\n pl = list(sorted(\n ((r, class_names[i]) for i, r in enumerate(probs[0])),\n reverse=True))\n labels.append((angle, pl))\n return labels\n\n\nclimgs = classify(imgs)\nfor angle, res in climgs:\n print(\"angle={} - {}\".format(angle, res[:5]))\n\n\nplot_gallery_images([img[1] for img in imgs],\n [img[1][0][1][:15] for img in climgs])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Transfer learning in a pipeline\n\nThe proposed transfer learning consists\nusing a PCA to projet the probabilities\non a graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "with open(model_name, 'rb') as f:\n model_bytes = f.read()\n\npipe = Pipeline(steps=[\n ('deep', OnnxTransformer(\n model_bytes, runtime='onnxruntime1', change_batch_size=0)),\n ('pca', PCA(2))\n])\n\nX_train = numpy.vstack(\n [im2array(img) for _, img in imgs]).astype(numpy.float32)\npipe.fit(X_train)\n\nproj = pipe.transform(X_train)\nprint(proj)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Graph for the PCA\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(proj[:, 0], proj[:, 1], 'o')\nax.set_title(\"Projection of classification probabilities\")\ntext = [\"%1.0f-%s\" % (el[0], el[1][0][1]) for el in climgs]\nfor label, x, y in zip(text, proj[:, 0], proj[:, 1]):\n ax.annotate(\n label, xy=(x, y), xytext=(-10, 10), fontsize=8,\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Remove one layer at the end\n\nThe last is often removed before the model is\ninserted in a pipeline. Let's see how to do that.\nFirst, we need the list of output for every node.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = onnx.load(BytesIO(model_bytes))\noutputs = []\nfor node in model_onnx.graph.node:\n print(node.name, node.output)\n outputs.extend(node.output)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We select one of the last one.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "selected = outputs[-3]\nprint(\"selected\", selected)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we tell *OnnxTransformer* to use that\nspecific one and to flatten the output\nas the dimension is not a matrix.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pipe2 = Pipeline(steps=[\n ('deep', OnnxTransformer(\n model_bytes, runtime='onnxruntime1', change_batch_size=0,\n output_name=selected, reshape=True)),\n ('pca', PCA(2))\n])\n\ntry:\n pipe2.fit(X_train)\nexcept InvalidArgument as e:\n print(\"Unable to fit due to\", e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We check that it is different.\nThe following values are the shape of the\nPCA components. The number of column is the number\nof dimensions of the outputs of the transfered\nneural network.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(pipe.steps[1][1].components_.shape,\n pipe2.steps[1][1].components_.shape)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Graph again.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "proj2 = pipe2.transform(X_train)\n\nfig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(proj2[:, 0], proj2[:, 1], 'o')\nax.set_title(\"Second projection of classification probabilities\")\ntext = [\"%1.0f-%s\" % (el[0], el[1][0][1]) for el in climgs]\nfor label, x, y in zip(text, proj2[:, 0], proj2[:, 1]):\n ax.annotate(\n label, xy=(x, y), xytext=(-10, 10), fontsize=8,\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Transfer Learning with ONNX\n\n.. index:: transfer learning, deep learning\n\nTransfer learning is common with deep learning.\nA deep learning model is used as preprocessing before\nthe output is sent to a final classifier or regressor.\nIt is not quite easy in this case to mix framework,\n:epkg:`scikit-learn` with :epkg:`pytorch`\n(or :epkg:`skorch`), the Keras API for Tensorflow,\n[tf.keras.wrappers.scikit_learn](https://www.tensorflow.org/api_docs/python/tf/\nkeras/wrappers/scikit_learn). Every combination\nrequires work. ONNX reduces the number of platforms to\nsupport. Once the model is converted into ONNX,\nit can be inserted in any :epkg:`scikit-learn` pipeline.\n\n## Retrieve and load a model\n\nWe download one model from the :epkg:`ONNX Zoo` but the model\ncould be trained and produced by another converter library.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import sys\nfrom io import BytesIO\nimport onnx\nfrom mlprodict.sklapi import OnnxTransformer\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom mlinsights.plotting.gallery import plot_gallery_images\nimport matplotlib.pyplot as plt\nfrom skl2onnx.tutorial.imagenet_classes import class_names\nimport numpy\nfrom PIL import Image\nfrom onnxruntime import InferenceSession\nfrom onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument\nimport os\nimport urllib.request\n\n\ndef download_file(url, name, min_size):\n if not os.path.exists(name):\n print(\"download '%s'\" % url)\n with urllib.request.urlopen(url) as u:\n content = u.read()\n if len(content) < min_size:\n raise RuntimeError(\n \"Unable to download '{}' due to\\n{}\".format(\n url, content))\n print(\"downloaded %d bytes.\" % len(content))\n with open(name, \"wb\") as f:\n f.write(content)\n else:\n print(\"'%s' already downloaded\" % name)\n\n\nmodel_name = \"squeezenet1.1-7.onnx\"\nurl_name = (\"https://github.com/onnx/models/raw/main/vision/\"\n \"classification/squeezenet/model\")\nurl_name += \"/\" + model_name\ntry:\n download_file(url_name, model_name, 100000)\nexcept RuntimeError as e:\n print(e)\n sys.exit(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Loading the ONNX file and use it on one image.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(model_name)\n\nfor inp in sess.get_inputs():\n print(inp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model expects a series of images of size\n`[3, 224, 224]`.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Classifying an image\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "url = (\"https://upload.wikimedia.org/wikipedia/commons/d/d2/\"\n \"East_Coker_elm%2C_2.jpg\")\nimg = \"East_Coker_elm.jpg\"\ndownload_file(url, img, 100000)\n\nim0 = Image.open(img)\nim = im0.resize((224, 224))\n# im.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Image to numpy and predection.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def im2array(im):\n X = numpy.asarray(im)\n X = X.transpose(2, 0, 1)\n X = X.reshape(1, 3, 224, 224)\n return X\n\n\nX = im2array(im)\nout = sess.run(None, {'data': X.astype(numpy.float32)})\nout = out[0]\n\nprint(out[0, :5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Interpretation\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "res = list(sorted((r, class_names[i]) for i, r in enumerate(out[0])))\nprint(res[-5:])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Classifying more images\n\nThe initial image is rotated,\nthe answer is changing.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "angles = [a * 2. for a in range(-6, 6)]\nimgs = [(angle, im0.rotate(angle).resize((224, 224)))\n for angle in angles]\n\n\ndef classify(imgs):\n labels = []\n for angle, img in imgs:\n X = im2array(img)\n probs = sess.run(None, {'data': X.astype(numpy.float32)})[0]\n pl = list(sorted(\n ((r, class_names[i]) for i, r in enumerate(probs[0])),\n reverse=True))\n labels.append((angle, pl))\n return labels\n\n\nclimgs = classify(imgs)\nfor angle, res in climgs:\n print(\"angle={} - {}\".format(angle, res[:5]))\n\n\nplot_gallery_images([img[1] for img in imgs],\n [img[1][0][1][:15] for img in climgs])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Transfer learning in a pipeline\n\nThe proposed transfer learning consists\nusing a PCA to projet the probabilities\non a graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "with open(model_name, 'rb') as f:\n model_bytes = f.read()\n\npipe = Pipeline(steps=[\n ('deep', OnnxTransformer(\n model_bytes, runtime='onnxruntime1', change_batch_size=0)),\n ('pca', PCA(2))\n])\n\nX_train = numpy.vstack(\n [im2array(img) for _, img in imgs]).astype(numpy.float32)\npipe.fit(X_train)\n\nproj = pipe.transform(X_train)\nprint(proj)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Graph for the PCA\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(proj[:, 0], proj[:, 1], 'o')\nax.set_title(\"Projection of classification probabilities\")\ntext = [\"%1.0f-%s\" % (el[0], el[1][0][1]) for el in climgs]\nfor label, x, y in zip(text, proj[:, 0], proj[:, 1]):\n ax.annotate(\n label, xy=(x, y), xytext=(-10, 10), fontsize=8,\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Remove one layer at the end\n\nThe last is often removed before the model is\ninserted in a pipeline. Let's see how to do that.\nFirst, we need the list of output for every node.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = onnx.load(BytesIO(model_bytes))\noutputs = []\nfor node in model_onnx.graph.node:\n print(node.name, node.output)\n outputs.extend(node.output)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We select one of the last one.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "selected = outputs[-3]\nprint(\"selected\", selected)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we tell *OnnxTransformer* to use that\nspecific one and to flatten the output\nas the dimension is not a matrix.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pipe2 = Pipeline(steps=[\n ('deep', OnnxTransformer(\n model_bytes, runtime='onnxruntime1', change_batch_size=0,\n output_name=selected, reshape=True)),\n ('pca', PCA(2))\n])\n\ntry:\n pipe2.fit(X_train)\nexcept InvalidArgument as e:\n print(\"Unable to fit due to\", e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We check that it is different.\nThe following values are the shape of the\nPCA components. The number of column is the number\nof dimensions of the outputs of the transfered\nneural network.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(pipe.steps[1][1].components_.shape,\n pipe2.steps[1][1].components_.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Graph again.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "proj2 = pipe2.transform(X_train)\n\nfig, ax = plt.subplots(1, 1, figsize=(5, 5))\nax.plot(proj2[:, 0], proj2[:, 1], 'o')\nax.set_title(\"Second projection of classification probabilities\")\ntext = [\"%1.0f-%s\" % (el[0], el[1][0][1]) for el in climgs]\nfor label, x, y in zip(text, proj2[:, 0], proj2[:, 1]):\n ax.annotate(\n label, xy=(x, y), xytext=(-10, 10), fontsize=8,\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/c60e190dd1235e389247f0b47c599940/plot_fbegin_investigate.ipynb b/_downloads/c60e190dd1235e389247f0b47c599940/plot_fbegin_investigate.ipynb index 8d239f9e5..8b22b165d 100644 --- a/_downloads/c60e190dd1235e389247f0b47c599940/plot_fbegin_investigate.ipynb +++ b/_downloads/c60e190dd1235e389247f0b47c599940/plot_fbegin_investigate.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Intermediate results and investigation\n\n.. index:: investigate, intermediate results\n\nThere are many reasons why a user wants more than using\nthe converted model into ONNX. Intermediate results may be\nneeded, the output of every node in the graph. The ONNX\nmay need to be altered to remove some nodes.\nTransfer learning is usually removing the last layers of\na deep neural network. Another reaason is debugging.\nIt often happens that the runtime fails to compute the predictions\ndue to a shape mismatch. Then it is useful the get the shape\nof every intermediate result. This example looks into two\nways of doing it.\n\n## Look into pipeline steps\n\nThe first way is a tricky one: it overloads\nmethods *transform*, *predict* and *predict_proba*\nto keep a copy of inputs and outputs. It then goes\nthrough every step of the pipeline. If the pipeline\nhas *n* steps, it converts the pipeline with step 1,\nthen the pipeline with steps 1, 2, then 1, 2, 3...\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import to_onnx\nfrom skl2onnx.helpers import collect_intermediate_steps\nfrom skl2onnx.common.data_types import FloatTensorType" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The pipeline.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data = load_iris()\nX = data.data\n\npipe = Pipeline(steps=[\n ('std', StandardScaler()),\n ('km', KMeans(3))\n])\npipe.fit(X)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The function goes through every step,\noverloads the methods *transform* and\nreturns an ONNX graph for every step.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "steps = collect_intermediate_steps(\n pipe, \"pipeline\",\n [(\"X\", FloatTensorType([None, X.shape[1]]))])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We call method transform to population the\ncache the overloaded methods *transform* keeps.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pipe.transform(X)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We compute every step and compare\nONNX and scikit-learn outputs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for step in steps:\n print('----------------------------')\n print(step['model'])\n onnx_step = step['onnx_step']\n sess = InferenceSession(onnx_step.SerializeToString())\n onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)})\n onnx_output = onnx_outputs[-1]\n skl_outputs = step['model']._debug.outputs['transform']\n\n # comparison\n diff = numpy.abs(skl_outputs.ravel() - onnx_output.ravel()).max()\n print(\"difference\", diff)\n\n# That was the first way: dynamically overwrite\n# every method transform or predict in a scikit-learn\n# pipeline to capture the input and output of every step,\n# compare them to the output produced by truncated ONNX\n# graphs built from the first one.\n#" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Python runtime to look into every node\n\nThe python runtime may be useful to easily look\ninto every node of the ONNX graph.\nThis option can be used to check when the computation\nfails due to nan values or a dimension mismatch.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(pipe, X[:1].astype(numpy.float32))\n\noinf = OnnxInference(onx)\noinf.run({'X': X[:2].astype(numpy.float32)},\n verbose=1, fLOG=print)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And to get a sense of the intermediate results.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf.run({'X': X[:2].astype(numpy.float32)},\n verbose=3, fLOG=print)\n\n# This way is usually better if you need to investigate\n# issues within the code of the runtime for an operator.\n#" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Intermediate results and investigation\n\n.. index:: investigate, intermediate results\n\nThere are many reasons why a user wants more than using\nthe converted model into ONNX. Intermediate results may be\nneeded, the output of every node in the graph. The ONNX\nmay need to be altered to remove some nodes.\nTransfer learning is usually removing the last layers of\na deep neural network. Another reaason is debugging.\nIt often happens that the runtime fails to compute the predictions\ndue to a shape mismatch. Then it is useful the get the shape\nof every intermediate result. This example looks into two\nways of doing it.\n\n## Look into pipeline steps\n\nThe first way is a tricky one: it overloads\nmethods *transform*, *predict* and *predict_proba*\nto keep a copy of inputs and outputs. It then goes\nthrough every step of the pipeline. If the pipeline\nhas *n* steps, it converts the pipeline with step 1,\nthen the pipeline with steps 1, 2, then 1, 2, 3...\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import to_onnx\nfrom skl2onnx.helpers import collect_intermediate_steps\nfrom skl2onnx.common.data_types import FloatTensorType" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The pipeline.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data = load_iris()\nX = data.data\n\npipe = Pipeline(steps=[\n ('std', StandardScaler()),\n ('km', KMeans(3, n_init=3))\n])\npipe.fit(X)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function goes through every step,\noverloads the methods *transform* and\nreturns an ONNX graph for every step.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "steps = collect_intermediate_steps(\n pipe, \"pipeline\",\n [(\"X\", FloatTensorType([None, X.shape[1]]))],\n target_opset=17)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We call method transform to population the\ncache the overloaded methods *transform* keeps.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pipe.transform(X)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We compute every step and compare\nONNX and scikit-learn outputs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for step in steps:\n print('----------------------------')\n print(step['model'])\n onnx_step = step['onnx_step']\n sess = InferenceSession(onnx_step.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\n onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)})\n onnx_output = onnx_outputs[-1]\n skl_outputs = step['model']._debug.outputs['transform']\n\n # comparison\n diff = numpy.abs(skl_outputs.ravel() - onnx_output.ravel()).max()\n print(\"difference\", diff)\n\n# That was the first way: dynamically overwrite\n# every method transform or predict in a scikit-learn\n# pipeline to capture the input and output of every step,\n# compare them to the output produced by truncated ONNX\n# graphs built from the first one.\n#" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Python runtime to look into every node\n\nThe python runtime may be useful to easily look\ninto every node of the ONNX graph.\nThis option can be used to check when the computation\nfails due to nan values or a dimension mismatch.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(pipe, X[:1].astype(numpy.float32),\n target_opset=17)\n\noinf = OnnxInference(onx)\noinf.run({'X': X[:2].astype(numpy.float32)},\n verbose=1, fLOG=print)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And to get a sense of the intermediate results.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf.run({'X': X[:2].astype(numpy.float32)},\n verbose=3, fLOG=print)\n\n# This way is usually better if you need to investigate\n# issues within the code of the runtime for an operator.\n#" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/cbf86eeb19babe7c8838aab5cabb6052/plot_pipeline_lightgbm.ipynb b/_downloads/cbf86eeb19babe7c8838aab5cabb6052/plot_pipeline_lightgbm.ipynb index f8d4f2565..c5831a52b 100644 --- a/_downloads/cbf86eeb19babe7c8838aab5cabb6052/plot_pipeline_lightgbm.ipynb +++ b/_downloads/cbf86eeb19babe7c8838aab5cabb6052/plot_pipeline_lightgbm.ipynb @@ -1,176 +1,176 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with a LightGbm model\n\n.. index:: LightGbm\n\n*sklearn-onnx* only converts *scikit-learn* models into *ONNX*\nbut many libraries implement *scikit-learn* API so that their models\ncan be included in a *scikit-learn* pipeline. This example considers\na pipeline including a *LightGbm* model. *sklearn-onnx* can convert\nthe whole pipeline as long as it knows the converter associated to\na *LGBMClassifier*. Let's see how to do it.\n\n## Train a LightGBM classifier\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import lightgbm\nimport onnxmltools\nimport skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\nimport onnxmltools.convert.common.data_types\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', LGBMClassifier(n_estimators=3))])\npipe.fit(X, y)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Register the converter for LGBMClassifier\n\nThe converter is implemented in *onnxmltools*:\n`onnxmltools...LightGbm.py\n`_.\nand the shape calculator:\n`onnxmltools...Classifier.py\n`_.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then we import the converter and shape calculator.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's register the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert again\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(\n pipe, 'pipeline_lightgbm',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_lightgbm.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nPredictions with LightGbm.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n sess = rt.InferenceSession(\"pipeline_lightgbm.onnx\")\nexcept OrtFail as e:\n print(e)\n print(\"The converter requires onnxmltools>=1.7.0\")\n sess = None\n\nif sess is not None:\n pred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\n print(\"predict\", pred_onx[0])\n print(\"predict_proba\", pred_onx[1][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\n\nimage = plt.imread(\"pipeline.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)\nprint(\"onnxmltools: \", onnxmltools.__version__)\nprint(\"lightgbm: \", lightgbm.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a LightGbm model\n\n.. index:: LightGbm\n\n*sklearn-onnx* only converts *scikit-learn* models into *ONNX*\nbut many libraries implement *scikit-learn* API so that their models\ncan be included in a *scikit-learn* pipeline. This example considers\na pipeline including a *LightGbm* model. *sklearn-onnx* can convert\nthe whole pipeline as long as it knows the converter associated to\na *LGBMClassifier*. Let's see how to do it.\n\n## Train a LightGBM classifier\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import lightgbm\nimport onnxmltools\nimport skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\nimport onnxmltools.convert.common.data_types\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', LGBMClassifier(n_estimators=3))])\npipe.fit(X, y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for LGBMClassifier\n\nThe converter is implemented in *onnxmltools*:\n[onnxmltools...LightGbm.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/operator_converters/LightGbm.py).\nand the shape calculator:\n[onnxmltools...Classifier.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/shape_calculators/Classifier.py).\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we import the converter and shape calculator.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's register the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert again\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n pipe, 'pipeline_lightgbm',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_lightgbm.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nPredictions with LightGbm.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n sess = rt.InferenceSession(\"pipeline_lightgbm.onnx\")\nexcept OrtFail as e:\n print(e)\n print(\"The converter requires onnxmltools>=1.7.0\")\n sess = None\n\nif sess is not None:\n pred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\n print(\"predict\", pred_onx[0])\n print(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\n\nimage = plt.imread(\"pipeline.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)\nprint(\"onnxmltools: \", onnxmltools.__version__)\nprint(\"lightgbm: \", lightgbm.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/cc5814881934f1b06740f8e81a4ff224/plot_custom_parser_alternative.ipynb b/_downloads/cc5814881934f1b06740f8e81a4ff224/plot_custom_parser_alternative.ipynb index b21fa9e97..d797c6a77 100644 --- a/_downloads/cc5814881934f1b06740f8e81a4ff224/plot_custom_parser_alternative.ipynb +++ b/_downloads/cc5814881934f1b06740f8e81a4ff224/plot_custom_parser_alternative.ipynb @@ -1,270 +1,270 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# When a custom model is neither a classifier nor a regressor (alternative)\n\n

Note

This example rewrites `l-custom-parser` by using\n the syntax proposed in example `l-onnx-operators`\n to write the custom converter, shape calculator and parser.

\n\n*scikit-learn*'s API specifies that a regressor produces one\noutputs and a classifier produces two\noutputs, predicted labels and probabilities. The goal here is\nto add a third result which tells if the probability is\nabove a given threshold. That's implemented in method\n*validate*.\n\n## Iris and scoring\n\nA new class is created, it trains any classifier and implements\nthe method *validate* mentioned above.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import inspect\nimport numpy as np\nimport skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.base import ClassifierMixin, BaseEstimator, clone\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import update_registered_converter\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom skl2onnx import to_onnx, get_model_alias\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxGreater, OnnxCast, OnnxReduceMax, OnnxIdentity\n)\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nimport matplotlib.pyplot as plt\n\n\nclass ValidatorClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, estimator=None, threshold=0.75):\n ClassifierMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = LogisticRegression(solver='liblinear')\n self.estimator = estimator\n self.threshold = threshold\n\n def fit(self, X, y, sample_weight=None):\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, y, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, y)\n return self\n\n def predict(self, X):\n return self.estimator_.predict(X)\n\n def predict_proba(self, X):\n return self.estimator_.predict_proba(X)\n\n def validate(self, X):\n pred = self.predict_proba(X)\n mx = pred.max(axis=1)\n return (mx >= self.threshold) * 1\n\n\ndata = load_iris()\nX, y = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nmodel = ValidatorClassifier()\nmodel.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's now measure the indicator which tells\nif the probability of a prediction is above\na threshold.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(model.validate(X_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\nThe conversion fails for a new model because\nthe library does not know any converter associated\nto this new model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(model, X_train[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Custom converter\n\nWe reuse some pieces of code from `l-custom-model`.\nThe shape calculator defines the shape of every output\nof the converted model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_shape_calculator(operator):\n\n input0 = operator.inputs[0] # first input in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n if len(outputs) != 3:\n raise RuntimeError(\"3 outputs expected not {}.\".format(len(outputs)))\n\n N = input0.type.shape[0] # number of observations\n C = op.estimator_.classes_.shape[0] # dimension of outputs\n\n outputs[0].type = Int64TensorType([N]) # label\n outputs[1].type = FloatTensorType([N, C]) # probabilities\n outputs[2].type = Int64TensorType([C]) # validation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_converter(scope, operator, container):\n input0 = operator.inputs[0] # first input in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n opv = container.target_opset\n\n # The model calls another one. The class `OnnxSubEstimator`\n # calls the converter for this operator.\n model = op.estimator_\n onnx_op = OnnxSubEstimator(model, input0, op_version=opv,\n options={'zipmap': False})\n\n rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv)\n great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32),\n op_version=opv)\n valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64,\n op_version=opv)\n\n r1 = OnnxIdentity(onnx_op[0], output_names=[outputs[0].full_name],\n op_version=opv)\n r2 = OnnxIdentity(onnx_op[1], output_names=[outputs[1].full_name],\n op_version=opv)\n r3 = OnnxIdentity(valid, output_names=[outputs[2].full_name],\n op_version=opv)\n\n r1.add_to(scope, container)\n r2.add_to(scope, container)\n r3.add_to(scope, container)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the registration.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And conversion...\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It fails because the library expected the model\nto behave like a classifier which produces two\noutputs. We need to add a custom parser to\ntell the library this model produces three outputs.\n\n## Custom parser\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def validator_classifier_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_val = scope.declare_local_variable('val_val', Int64TensorType())\n this_operator.outputs.append(val_label)\n this_operator.outputs.append(val_prob)\n this_operator.outputs.append(val_val)\n\n # ends\n return this_operator.outputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Registration.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter,\n parser=validator_classifier_parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And conversion again.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final test\n\nWe need now to check the results are the same with ONNX.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X32 = X_test[:5].astype(np.float32)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nresults = sess.run(None, {'X': X32})\n\nprint(\"--labels--\")\nprint(\"sklearn\", model.predict(X32))\nprint(\"onnx\", results[0])\nprint(\"--probabilities--\")\nprint(\"sklearn\", model.predict_proba(X32))\nprint(\"onnx\", results[1])\nprint(\"--validation--\")\nprint(\"sklearn\", model.validate(X32))\nprint(\"onnx\", results[2])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It looks good.\n\n## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"validator_classifier.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot')\n\nimage = plt.imread(\"validator_classifier.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# When a custom model is neither a classifier nor a regressor (alternative)\n\n

Note

This example rewrites `l-custom-parser` by using\n the syntax proposed in example `l-onnx-operators`\n to write the custom converter, shape calculator and parser.

\n\n*scikit-learn*'s API specifies that a regressor produces one\noutputs and a classifier produces two\noutputs, predicted labels and probabilities. The goal here is\nto add a third result which tells if the probability is\nabove a given threshold. That's implemented in method\n*validate*.\n\n## Iris and scoring\n\nA new class is created, it trains any classifier and implements\nthe method *validate* mentioned above.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import inspect\nimport numpy as np\nimport skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.base import ClassifierMixin, BaseEstimator, clone\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import update_registered_converter\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom skl2onnx import to_onnx, get_model_alias\nfrom skl2onnx.proto import onnx_proto\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxGreater, OnnxCast, OnnxReduceMaxApi18, OnnxIdentity\n)\nfrom skl2onnx.algebra.onnx_operator import OnnxSubEstimator\nimport matplotlib.pyplot as plt\n\n\nclass ValidatorClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, estimator=None, threshold=0.75):\n ClassifierMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = LogisticRegression(solver='liblinear')\n self.estimator = estimator\n self.threshold = threshold\n\n def fit(self, X, y, sample_weight=None):\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, y, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, y)\n return self\n\n def predict(self, X):\n return self.estimator_.predict(X)\n\n def predict_proba(self, X):\n return self.estimator_.predict_proba(X)\n\n def validate(self, X):\n pred = self.predict_proba(X)\n mx = pred.max(axis=1)\n return (mx >= self.threshold) * 1\n\n\ndata = load_iris()\nX, y = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nmodel = ValidatorClassifier()\nmodel.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now measure the indicator which tells\nif the probability of a prediction is above\na threshold.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(model.validate(X_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\nThe conversion fails for a new model because\nthe library does not know any converter associated\nto this new model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(model, X_train[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom converter\n\nWe reuse some pieces of code from `l-custom-model`.\nThe shape calculator defines the shape of every output\nof the converted model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_shape_calculator(operator):\n\n input0 = operator.inputs[0] # first input in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n if len(outputs) != 3:\n raise RuntimeError(\"3 outputs expected not {}.\".format(len(outputs)))\n\n N = input0.type.shape[0] # number of observations\n C = op.estimator_.classes_.shape[0] # dimension of outputs\n\n outputs[0].type = Int64TensorType([N]) # label\n outputs[1].type = FloatTensorType([N, C]) # probabilities\n outputs[2].type = Int64TensorType([C]) # validation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_converter(scope, operator, container):\n input0 = operator.inputs[0] # first input in ONNX graph\n outputs = operator.outputs # outputs in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n opv = container.target_opset\n\n # The model calls another one. The class `OnnxSubEstimator`\n # calls the converter for this operator.\n model = op.estimator_\n onnx_op = OnnxSubEstimator(model, input0, op_version=opv,\n options={'zipmap': False})\n\n rmax = OnnxReduceMaxApi18(onnx_op[1], axes=[1], keepdims=0, op_version=opv)\n great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32),\n op_version=opv)\n valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64,\n op_version=opv)\n\n r1 = OnnxIdentity(onnx_op[0], output_names=[outputs[0].full_name],\n op_version=opv)\n r2 = OnnxIdentity(onnx_op[1], output_names=[outputs[1].full_name],\n op_version=opv)\n r3 = OnnxIdentity(valid, output_names=[outputs[2].full_name],\n op_version=opv)\n\n r1.add_to(scope, container)\n r2.add_to(scope, container)\n r3.add_to(scope, container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the registration.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And conversion...\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)\nexcept RuntimeError as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It fails because the library expected the model\nto behave like a classifier which produces two\noutputs. We need to add a custom parser to\ntell the library this model produces three outputs.\n\n## Custom parser\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def validator_classifier_parser(scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n val_label = scope.declare_local_variable('val_label', Int64TensorType())\n val_prob = scope.declare_local_variable('val_prob', FloatTensorType())\n val_val = scope.declare_local_variable('val_val', Int64TensorType())\n this_operator.outputs.append(val_label)\n this_operator.outputs.append(val_prob)\n this_operator.outputs.append(val_val)\n\n # ends\n return this_operator.outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Registration.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(ValidatorClassifier, 'CustomValidatorClassifier',\n validator_classifier_shape_calculator,\n validator_classifier_converter,\n parser=validator_classifier_parser)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And conversion again.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = to_onnx(model, X_test[:1].astype(np.float32),\n target_opset=12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final test\n\nWe need now to check the results are the same with ONNX.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X32 = X_test[:5].astype(np.float32)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nresults = sess.run(None, {'X': X32})\n\nprint(\"--labels--\")\nprint(\"sklearn\", model.predict(X32))\nprint(\"onnx\", results[0])\nprint(\"--probabilities--\")\nprint(\"sklearn\", model.predict_proba(X32))\nprint(\"onnx\", results[1])\nprint(\"--validation--\")\nprint(\"sklearn\", model.validate(X32))\nprint(\"onnx\", results[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It looks good.\n\n## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"validator_classifier.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot')\n\nimage = plt.imread(\"validator_classifier.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/cd1d1015cbc40506764127851ff7784d/plot_convert_model.ipynb b/_downloads/cd1d1015cbc40506764127851ff7784d/plot_convert_model.ipynb index 4c8317648..dee879a73 100644 --- a/_downloads/cd1d1015cbc40506764127851ff7784d/plot_convert_model.ipynb +++ b/_downloads/cd1d1015cbc40506764127851ff7784d/plot_convert_model.ipynb @@ -1,126 +1,126 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Train, convert and predict a model\n\nTrain and deploy a model usually involves the\nthree following steps:\n\n* train a pipeline with *scikit-learn*,\n* convert it into *ONNX* with *sklearn-onnx*,\n* predict with *onnxruntime*.\n\n## Train a model\n\nA very basic example using random forest and\nthe iris dataset.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.linear_model import LogisticRegression\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = RandomForestClassifier()\nclr.fit(X_train, y_train)\nprint(clr)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert a model into ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\n\nwith open(\"rf_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compute the prediction with ONNX Runtime\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"rf_iris.onnx\")\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run(\n [label_name], {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Full example with a logistic regression\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "clr = LogisticRegression()\nclr.fit(X_train, y_train)\ninitial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\nwith open(\"logreg_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())\n\nsess = rt.InferenceSession(\"logreg_iris.onnx\")\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run([label_name],\n {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Train, convert and predict a model\n\nTrain and deploy a model usually involves the\nthree following steps:\n\n* train a pipeline with *scikit-learn*,\n* convert it into *ONNX* with *sklearn-onnx*,\n* predict with *onnxruntime*.\n\n## Train a model\n\nA very basic example using random forest and\nthe iris dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnx\nimport sklearn\nfrom sklearn.linear_model import LogisticRegression\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = RandomForestClassifier()\nclr.fit(X_train, y_train)\nprint(clr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert a model into ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_type = [('float_input', FloatTensorType([None, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\n\nwith open(\"rf_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compute the prediction with ONNX Runtime\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"rf_iris.onnx\", providers=[\"CPUExecutionProvider\"])\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run(\n [label_name], {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Full example with a logistic regression\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "clr = LogisticRegression()\nclr.fit(X_train, y_train)\ninitial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]\nonx = convert_sklearn(clr, initial_types=initial_type,\n target_opset=12)\nwith open(\"logreg_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())\n\nsess = rt.InferenceSession(\"logreg_iris.onnx\")\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run([label_name],\n {input_name: X_test.astype(numpy.float32)})[0]\nprint(pred_onx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/ceb5576281875218f95acece4a8b7d14/plot_custom_model.ipynb b/_downloads/ceb5576281875218f95acece4a8b7d14/plot_custom_model.ipynb index d5609cea4..4a4b04156 100644 --- a/_downloads/ceb5576281875218f95acece4a8b7d14/plot_custom_model.ipynb +++ b/_downloads/ceb5576281875218f95acece4a8b7d14/plot_custom_model.ipynb @@ -1,270 +1,270 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Write your own converter for your own model\n\nIt might happen that you implemented your own model\nand there is obviously no existing converter for this\nnew model. That does not mean the conversion of a pipeline\nwhich includes it would not work. Let's see how to do it.\n\n`t-SNE `_ is an interesting\ntransform which can only be used to study data as there is no\nway to reproduce the result once it was fitted. That's why\nthe class `TSNE `_\ndoes not have any method *transform*, only\n`fit_transform `_.\nThis example proposes a way to train a machine learned model\nwhich approximates the outputs of a *t-SNE* transformer.\n\n## Implementation of the new transform\n\nThe first section is about the implementation.\nThe code is quite generic but basically follows this\nprocess to fit the model with *X* and *y*:\n\n* t-SNE, $(X, y) \\rightarrow X_2 \\in \\mathbb{R}^2$\n* k nearest neightbours, $fit(X, X_2)$,\n which produces function $f(X) \\rightarrow X_3$\n* final normalization, simple scaling $X_3 \\rightarrow X_4$\n\nAnd to predict on a test set:\n\n* k nearest neightbours, $f(X') \\rightarrow X'_3$\n* final normalization, simple scaling $X'_3 \\rightarrow X'_4$\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import inspect\nimport os\nimport numpy\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom matplotlib import offsetbox\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom skl2onnx import update_registered_converter\nimport skl2onnx\nfrom skl2onnx import convert_sklearn, get_model_alias\nfrom skl2onnx.common._registration import get_shape_calculator\nfrom skl2onnx.common.data_types import FloatTensorType\n\n\nclass PredictableTSNE(BaseEstimator, TransformerMixin):\n\n def __init__(self, transformer=None, estimator=None,\n normalize=True, keep_tsne_outputs=False, **kwargs):\n \"\"\"\n :param transformer: `TSNE` by default\n :param estimator: `MLPRegressor` by default\n :param normalize: normalizes the outputs, centers and normalizes\n the output of the *t-SNE* and applies that same\n normalization to he prediction of the estimator\n :param keep_tsne_output: if True, keep raw outputs of\n *TSNE* is stored in member *tsne_outputs_*\n :param kwargs: sent to :meth:`set_params `, see its\n documentation to understand how to specify parameters\n \"\"\"\n TransformerMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = KNeighborsRegressor()\n if transformer is None:\n transformer = TSNE()\n self.estimator = estimator\n self.transformer = transformer\n self.keep_tsne_outputs = keep_tsne_outputs\n if not hasattr(transformer, \"fit_transform\"):\n raise AttributeError(\n \"Transformer {} does not have a 'fit_transform' \"\n \"method.\".format(type(transformer)))\n if not hasattr(estimator, \"predict\"):\n raise AttributeError(\n \"Estimator {} does not have a 'predict' method.\".format(\n type(estimator)))\n self.normalize = normalize\n if kwargs:\n self.set_params(**kwargs)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Runs a *k-means* on each class\n then trains a classifier on the\n extended set of features.\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n y : numpy array of shape [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n sample_weight : numpy array of shape [n_samples]\n Individual weights for each sample\n Returns\n -------\n self : returns an instance of self.\n Attributes\n ----------\n transformer_: trained transformeer\n estimator_: trained regressor\n tsne_outputs_: t-SNE outputs if *keep_tsne_outputs* is True\n mean_: average of the *t-SNE* output on each dimension\n inv_std_: inverse of the standard deviation of the *t-SNE*\n output on each dimension\n loss_: loss (*mean_squared_error*)\n between the predictions and the outputs of t-SNE\n \"\"\"\n params = dict(y=y, sample_weight=sample_weight)\n\n self.transformer_ = clone(self.transformer)\n\n sig = inspect.signature(self.transformer.fit_transform)\n pars = {}\n for p in ['sample_weight', 'y']:\n if p in sig.parameters and p in params:\n pars[p] = params[p]\n target = self.transformer_.fit_transform(X, **pars)\n\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, target, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, target)\n mean = target.mean(axis=0)\n var = target.std(axis=0)\n self.mean_ = mean\n self.inv_std_ = 1. / var\n exp = (target - mean) * self.inv_std_\n got = (self.estimator_.predict(X) - mean) * self.inv_std_\n self.loss_ = mean_squared_error(exp, got)\n if self.keep_tsne_outputs:\n self.tsne_outputs_ = exp if self.normalize else target\n return self\n\n def transform(self, X):\n \"\"\"\n Runs the predictions.\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n Returns\n -------\n tranformed *X*\n \"\"\"\n pred = self.estimator_.predict(X)\n if self.normalize:\n pred -= self.mean_\n pred *= self.inv_std_\n return pred\n\n def get_params(self, deep=True):\n \"\"\"\n Returns the parameters for all the embedded objects.\n \"\"\"\n res = {}\n for k, v in self.transformer.get_params().items():\n res[\"t_\" + k] = v\n for k, v in self.estimator.get_params().items():\n res[\"e_\" + k] = v\n return res\n\n def set_params(self, **values):\n \"\"\"\n Sets the parameters before training.\n Every parameter prefixed by ``'e_'`` is an estimator\n parameter, every parameter prefixed by\n ``t_`` is for a transformer parameter.\n \"\"\"\n pt, pe, pn = {}, {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('t_'):\n pt[k[2:]] = v\n elif k.startswith('n_'):\n pn[k[2:]] = v\n else:\n raise ValueError(\"Unexpected parameter name '{0}'.\".format(k))\n self.transformer.set_params(**pt)\n self.estimator.set_params(**pe)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Experimentation on MNIST\n\nLet's fit t-SNE...\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "digits = datasets.load_digits(n_class=6)\nXd = digits.data\nyd = digits.target\nimgs = digits.images\nn_samples, n_features = Xd.shape\nn_samples, n_features\n\nX_train, X_test, y_train, y_test, imgs_train, imgs_test = train_test_split(\n Xd, yd, imgs)\n\ntsne = TSNE(n_components=2, init='pca', random_state=0)\n\n\ndef plot_embedding(Xp, y, imgs, title=None, figsize=(12, 4)):\n x_min, x_max = numpy.min(Xp, 0), numpy.max(Xp, 0)\n X = (Xp - x_min) / (x_max - x_min)\n\n fig, ax = plt.subplots(1, 2, figsize=figsize)\n for i in range(X.shape[0]):\n ax[0].text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.Set1(y[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n if hasattr(offsetbox, 'AnnotationBbox'):\n # only print thumbnails with matplotlib > 1.0\n shown_images = numpy.array([[1., 1.]]) # just something big\n for i in range(X.shape[0]):\n dist = numpy.sum((X[i] - shown_images) ** 2, 1)\n if numpy.min(dist) < 4e-3:\n # don't show points that are too close\n continue\n shown_images = numpy.r_[shown_images, [X[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r),\n X[i])\n ax[0].add_artist(imagebox)\n ax[0].set_xticks([]), ax[0].set_yticks([])\n ax[1].plot(Xp[:, 0], Xp[:, 1], '.')\n if title is not None:\n ax[0].set_title(title)\n return ax\n\n\nX_train_tsne = tsne.fit_transform(X_train)\nplot_embedding(X_train_tsne, y_train, imgs_train,\n \"t-SNE embedding of the digits\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Repeatable t-SNE\n\nJust to check it is working.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ptsne_knn = PredictableTSNE()\nptsne_knn.fit(X_train, y_train)\n\nX_train_tsne2 = ptsne_knn.transform(X_train)\nplot_embedding(X_train_tsne2, y_train, imgs_train,\n \"Predictable t-SNE of the digits\\n\"\n \"StandardScaler+KNeighborsRegressor\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We check on test set.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X_test_tsne2 = ptsne_knn.transform(X_test)\nplot_embedding(X_test_tsne2, y_test, imgs_test,\n \"Predictable t-SNE of the digits\\n\"\n \"StandardScaler+KNeighborsRegressor\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX - shape_calculator, converter\n\nNow starts the part dedicated to *ONNX*.\n*ONNX* conversion requires two function,\none to calculate the shape of the outputs based\non the inputs, the other one to do the actual\nconversion of the model.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def predictable_tsne_shape_calculator(operator):\n\n input = operator.inputs[0] # inputs in ONNX graph\n # output = operator.outputs[0] # output in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n N = input.type.shape[0] # number of observations\n C = op.estimator_._y.shape[1] # dimension of outputs\n\n # new output definition\n operator.outputs[0].type = FloatTensorType([N, C])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then the converter model. We\nreuse existing converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def predictable_tsne_converter(scope, operator, container):\n \"\"\"\n :param scope: name space, where to keep node names, get unused new names\n :param operator: operator to converter, same object as sent to\n *predictable_tsne_shape_calculator*\n :param container: contains the ONNX graph\n \"\"\"\n # input = operator.inputs[0] # input in ONNX graph\n output = operator.outputs[0] # output in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n # First step is the k nearest-neighbours,\n # we reuse existing converter and declare it as local\n # operator.\n model = op.estimator_\n alias = get_model_alias(type(model))\n knn_op = scope.declare_local_operator(alias, model)\n knn_op.inputs = operator.inputs\n\n # We add an intermediate outputs.\n knn_output = scope.declare_local_variable('knn_output', FloatTensorType())\n knn_op.outputs.append(knn_output)\n\n # We adjust the output of the submodel.\n shape_calc = get_shape_calculator(alias)\n shape_calc(knn_op)\n\n # We add the normalizer which needs a unique node name.\n name = scope.get_unique_operator_name('Scaler')\n\n # The parameter follows the specifications of ONNX\n # https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#ai.onnx.ml.Scaler\n attrs = dict(name=name,\n scale=op.inv_std_.ravel().astype(numpy.float32),\n offset=op.mean_.ravel().astype(numpy.float32))\n\n # Let's finally add the scaler which connects the output\n # of the k-nearest neighbours model to output of the whole model\n # declared in ONNX graph\n container.add_node('Scaler', [knn_output.onnx_name], [output.full_name],\n op_domain='ai.onnx.ml', **attrs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now need to declare the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(PredictableTSNE, 'CustomPredictableTSNE',\n predictable_tsne_shape_calculator,\n predictable_tsne_converter)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\nWe just need to call *convert_sklearn* as any other model\nto convert.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(\n ptsne_knn, 'predictable_tsne',\n [('input', FloatTensorType([None, X_test.shape[1]]))],\n target_opset=12)\n\n# And save.\nwith open(\"predictable_tsne.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now compare the prediction.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"ptsne_knn.tranform\\n\", ptsne_knn.transform(X_test[:2]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"predictable_tsne.onnx\")\n\npred_onx = sess.run(None, {\"input\": X_test[:1].astype(numpy.float32)})\nprint(\"transform\", pred_onx[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The converter for the nearest neighbours produces an ONNX graph\nwhich does not allow multiple predictions at a time. Let's call\n*onnxruntime* for the second row.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pred_onx = sess.run(None, {\"input\": X_test[1:2].astype(numpy.float32)})\nprint(\"transform\", pred_onx[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_tsne.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_tsne.dot')\n\nimage = plt.imread(\"pipeline_tsne.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Write your own converter for your own model\n\nIt might happen that you implemented your own model\nand there is obviously no existing converter for this\nnew model. That does not mean the conversion of a pipeline\nwhich includes it would not work. Let's see how to do it.\n\n[t-SNE](https://lvdmaaten.github.io/tsne/) is an interesting\ntransform which can only be used to study data as there is no\nway to reproduce the result once it was fitted. That's why\nthe class [TSNE](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.manifold.TSNE.html)\ndoes not have any method *transform*, only\n[fit_transform](https://scikit-learn.org/stable/modules/\ngenerated/sklearn.manifold.TSNE.html#sklearn.manifold.TSNE.fit_transform).\nThis example proposes a way to train a machine learned model\nwhich approximates the outputs of a *t-SNE* transformer.\n\n## Implementation of the new transform\n\nThe first section is about the implementation.\nThe code is quite generic but basically follows this\nprocess to fit the model with *X* and *y*:\n\n* t-SNE, $(X, y) \\rightarrow X_2 \\in \\mathbb{R}^2$\n* k nearest neightbours, $fit(X, X_2)$,\n which produces function $f(X) \\rightarrow X_3$\n* final normalization, simple scaling $X_3 \\rightarrow X_4$\n\nAnd to predict on a test set:\n\n* k nearest neightbours, $f(X') \\rightarrow X'_3$\n* final normalization, simple scaling $X'_3 \\rightarrow X'_4$\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import inspect\nimport os\nimport numpy\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom matplotlib import offsetbox\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom skl2onnx import update_registered_converter\nimport skl2onnx\nfrom skl2onnx import convert_sklearn, get_model_alias\nfrom skl2onnx.common._registration import get_shape_calculator\nfrom skl2onnx.common.data_types import FloatTensorType\n\n\nclass PredictableTSNE(BaseEstimator, TransformerMixin):\n\n def __init__(self, transformer=None, estimator=None,\n normalize=True, keep_tsne_outputs=False, **kwargs):\n \"\"\"\n :param transformer: `TSNE` by default\n :param estimator: `MLPRegressor` by default\n :param normalize: normalizes the outputs, centers and normalizes\n the output of the *t-SNE* and applies that same\n normalization to he prediction of the estimator\n :param keep_tsne_output: if True, keep raw outputs of\n *TSNE* is stored in member *tsne_outputs_*\n :param kwargs: sent to :meth:`set_params `, see its\n documentation to understand how to specify parameters\n \"\"\"\n TransformerMixin.__init__(self)\n BaseEstimator.__init__(self)\n if estimator is None:\n estimator = KNeighborsRegressor()\n if transformer is None:\n transformer = TSNE()\n self.estimator = estimator\n self.transformer = transformer\n self.keep_tsne_outputs = keep_tsne_outputs\n if not hasattr(transformer, \"fit_transform\"):\n raise AttributeError(\n \"Transformer {} does not have a 'fit_transform' \"\n \"method.\".format(type(transformer)))\n if not hasattr(estimator, \"predict\"):\n raise AttributeError(\n \"Estimator {} does not have a 'predict' method.\".format(\n type(estimator)))\n self.normalize = normalize\n if kwargs:\n self.set_params(**kwargs)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Runs a *k-means* on each class\n then trains a classifier on the\n extended set of features.\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n y : numpy array of shape [n_samples, n_targets]\n Target values. Will be cast to X's dtype if necessary\n sample_weight : numpy array of shape [n_samples]\n Individual weights for each sample\n Returns\n -------\n self : returns an instance of self.\n Attributes\n ----------\n transformer_: trained transformeer\n estimator_: trained regressor\n tsne_outputs_: t-SNE outputs if *keep_tsne_outputs* is True\n mean_: average of the *t-SNE* output on each dimension\n inv_std_: inverse of the standard deviation of the *t-SNE*\n output on each dimension\n loss_: loss (*mean_squared_error*)\n between the predictions and the outputs of t-SNE\n \"\"\"\n params = dict(y=y, sample_weight=sample_weight)\n\n self.transformer_ = clone(self.transformer)\n\n sig = inspect.signature(self.transformer.fit_transform)\n pars = {}\n for p in ['sample_weight', 'y']:\n if p in sig.parameters and p in params:\n pars[p] = params[p]\n target = self.transformer_.fit_transform(X, **pars)\n\n sig = inspect.signature(self.estimator.fit)\n if 'sample_weight' in sig.parameters:\n self.estimator_ = clone(self.estimator).fit(\n X, target, sample_weight=sample_weight)\n else:\n self.estimator_ = clone(self.estimator).fit(X, target)\n mean = target.mean(axis=0)\n var = target.std(axis=0)\n self.mean_ = mean\n self.inv_std_ = 1. / var\n exp = (target - mean) * self.inv_std_\n got = (self.estimator_.predict(X) - mean) * self.inv_std_\n self.loss_ = mean_squared_error(exp, got)\n if self.keep_tsne_outputs:\n self.tsne_outputs_ = exp if self.normalize else target\n return self\n\n def transform(self, X):\n \"\"\"\n Runs the predictions.\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n Returns\n -------\n tranformed *X*\n \"\"\"\n pred = self.estimator_.predict(X)\n if self.normalize:\n pred -= self.mean_\n pred *= self.inv_std_\n return pred\n\n def get_params(self, deep=True):\n \"\"\"\n Returns the parameters for all the embedded objects.\n \"\"\"\n res = {}\n for k, v in self.transformer.get_params().items():\n res[\"t_\" + k] = v\n for k, v in self.estimator.get_params().items():\n res[\"e_\" + k] = v\n return res\n\n def set_params(self, **values):\n \"\"\"\n Sets the parameters before training.\n Every parameter prefixed by ``'e_'`` is an estimator\n parameter, every parameter prefixed by\n ``t_`` is for a transformer parameter.\n \"\"\"\n pt, pe, pn = {}, {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('t_'):\n pt[k[2:]] = v\n elif k.startswith('n_'):\n pn[k[2:]] = v\n else:\n raise ValueError(\"Unexpected parameter name '{0}'.\".format(k))\n self.transformer.set_params(**pt)\n self.estimator.set_params(**pe)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Experimentation on MNIST\n\nLet's fit t-SNE...\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "digits = datasets.load_digits(n_class=6)\nXd = digits.data\nyd = digits.target\nimgs = digits.images\nn_samples, n_features = Xd.shape\nn_samples, n_features\n\nX_train, X_test, y_train, y_test, imgs_train, imgs_test = train_test_split(\n Xd, yd, imgs)\n\ntsne = TSNE(n_components=2, init='pca', random_state=0)\n\n\ndef plot_embedding(Xp, y, imgs, title=None, figsize=(12, 4)):\n x_min, x_max = numpy.min(Xp, 0), numpy.max(Xp, 0)\n X = (Xp - x_min) / (x_max - x_min)\n\n fig, ax = plt.subplots(1, 2, figsize=figsize)\n for i in range(X.shape[0]):\n ax[0].text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.Set1(y[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n if hasattr(offsetbox, 'AnnotationBbox'):\n # only print thumbnails with matplotlib > 1.0\n shown_images = numpy.array([[1., 1.]]) # just something big\n for i in range(X.shape[0]):\n dist = numpy.sum((X[i] - shown_images) ** 2, 1)\n if numpy.min(dist) < 4e-3:\n # don't show points that are too close\n continue\n shown_images = numpy.r_[shown_images, [X[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r),\n X[i])\n ax[0].add_artist(imagebox)\n ax[0].set_xticks([]), ax[0].set_yticks([])\n ax[1].plot(Xp[:, 0], Xp[:, 1], '.')\n if title is not None:\n ax[0].set_title(title)\n return ax\n\n\nX_train_tsne = tsne.fit_transform(X_train)\nplot_embedding(X_train_tsne, y_train, imgs_train,\n \"t-SNE embedding of the digits\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Repeatable t-SNE\n\nJust to check it is working.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ptsne_knn = PredictableTSNE()\nptsne_knn.fit(X_train, y_train)\n\nX_train_tsne2 = ptsne_knn.transform(X_train)\nplot_embedding(X_train_tsne2, y_train, imgs_train,\n \"Predictable t-SNE of the digits\\n\"\n \"StandardScaler+KNeighborsRegressor\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We check on test set.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_test_tsne2 = ptsne_knn.transform(X_test)\nplot_embedding(X_test_tsne2, y_test, imgs_test,\n \"Predictable t-SNE of the digits\\n\"\n \"StandardScaler+KNeighborsRegressor\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX - shape_calculator, converter\n\nNow starts the part dedicated to *ONNX*.\n*ONNX* conversion requires two function,\none to calculate the shape of the outputs based\non the inputs, the other one to do the actual\nconversion of the model.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def predictable_tsne_shape_calculator(operator):\n\n input = operator.inputs[0] # inputs in ONNX graph\n # output = operator.outputs[0] # output in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n N = input.type.shape[0] # number of observations\n C = op.estimator_._y.shape[1] # dimension of outputs\n\n # new output definition\n operator.outputs[0].type = FloatTensorType([N, C])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then the converter model. We\nreuse existing converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def predictable_tsne_converter(scope, operator, container):\n \"\"\"\n :param scope: name space, where to keep node names, get unused new names\n :param operator: operator to converter, same object as sent to\n *predictable_tsne_shape_calculator*\n :param container: contains the ONNX graph\n \"\"\"\n # input = operator.inputs[0] # input in ONNX graph\n output = operator.outputs[0] # output in ONNX graph\n op = operator.raw_operator # scikit-learn model (mmust be fitted)\n\n # First step is the k nearest-neighbours,\n # we reuse existing converter and declare it as local\n # operator.\n model = op.estimator_\n alias = get_model_alias(type(model))\n knn_op = scope.declare_local_operator(alias, model)\n knn_op.inputs = operator.inputs\n\n # We add an intermediate outputs.\n knn_output = scope.declare_local_variable('knn_output', FloatTensorType())\n knn_op.outputs.append(knn_output)\n\n # We adjust the output of the submodel.\n shape_calc = get_shape_calculator(alias)\n shape_calc(knn_op)\n\n # We add the normalizer which needs a unique node name.\n name = scope.get_unique_operator_name('Scaler')\n\n # The parameter follows the specifications of ONNX\n # https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#ai.onnx.ml.Scaler\n attrs = dict(name=name,\n scale=op.inv_std_.ravel().astype(numpy.float32),\n offset=op.mean_.ravel().astype(numpy.float32))\n\n # Let's finally add the scaler which connects the output\n # of the k-nearest neighbours model to output of the whole model\n # declared in ONNX graph\n container.add_node('Scaler', [knn_output.onnx_name], [output.full_name],\n op_domain='ai.onnx.ml', **attrs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to declare the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(PredictableTSNE, 'CustomPredictableTSNE',\n predictable_tsne_shape_calculator,\n predictable_tsne_converter)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\nWe just need to call *convert_sklearn* as any other model\nto convert.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n ptsne_knn, 'predictable_tsne',\n [('input', FloatTensorType([None, X_test.shape[1]]))],\n target_opset=12)\n\n# And save.\nwith open(\"predictable_tsne.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now compare the prediction.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"ptsne_knn.tranform\\n\", ptsne_knn.transform(X_test[:2]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"predictable_tsne.onnx\")\n\npred_onx = sess.run(None, {\"input\": X_test[:1].astype(numpy.float32)})\nprint(\"transform\", pred_onx[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The converter for the nearest neighbours produces an ONNX graph\nwhich does not allow multiple predictions at a time. Let's call\n*onnxruntime* for the second row.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pred_onx = sess.run(None, {\"input\": X_test[1:2].astype(numpy.float32)})\nprint(\"transform\", pred_onx[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_tsne.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_tsne.dot')\n\nimage = plt.imread(\"pipeline_tsne.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/cfbad5b88c22e01670d66227b20d8b6f/plot_ngrams.py b/_downloads/cfbad5b88c22e01670d66227b20d8b6f/plot_ngrams.py new file mode 100644 index 000000000..b64fb9f4a --- /dev/null +++ b/_downloads/cfbad5b88c22e01670d66227b20d8b6f/plot_ngrams.py @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" +.. _example-ngrams: + +Tricky issue when converting CountVectorizer or TfidfVectorizer +=============================================================== + +This issue is described at `scikit-learn/issues/13733 +`_. +If a CountVectorizer or a TfidfVectorizer produces a token with a space, +skl2onnx cannot know if it a bi-grams or a unigram with a space. + +A simple example impossible to convert +++++++++++++++++++++++++++++++++++++++ +""" + +import pprint +import numpy +from numpy.testing import assert_almost_equal +from onnxruntime import InferenceSession +from sklearn.feature_extraction.text import TfidfVectorizer +from skl2onnx import to_onnx +from skl2onnx.sklapi import TraceableTfidfVectorizer +import skl2onnx.sklapi.register # noqa + +corpus = numpy.array([ + "This is the first document.", + "This document is the second document.", + "Is this the first document?", + "", +]).reshape((4, )) + +pattern = r"\b[a-z ]{1,10}\b" +mod1 = TfidfVectorizer(ngram_range=(1, 2), + token_pattern=pattern) +mod1.fit(corpus) + + +###################################### +# Unigrams and bi-grams are placed into the following container +# which maps it to its column index. + +pprint.pprint(mod1.vocabulary_) + + +#################################### +# Conversion. + +try: + to_onnx(mod1, corpus) +except RuntimeError as e: + print(e) + + +####################################### +# TraceableTfidfVectorizer +# ++++++++++++++++++++++++ +# +# Class :class:`TraceableTfidfVectorizer` is equivalent to +# :class:`sklearn.feature_extraction.text.TfidfVectorizer` +# but stores the unigrams and bi-grams of the vocabulary with tuple +# instead of concatenating every piece into a string. + + +mod2 = TraceableTfidfVectorizer( + ngram_range=(1, 2), token_pattern=pattern) +mod2.fit(corpus) + +pprint.pprint(mod2.vocabulary_) + +####################################### +# Let's check it produces the same results. + +assert_almost_equal(mod1.transform(corpus).todense(), + mod2.transform(corpus).todense()) + +#################################### +# Conversion. Line `import skl2onnx.sklapi.register` +# was added to register the converters associated to these +# new class. By default, only converters for scikit-learn are +# declared. + +onx = to_onnx(mod2, corpus) +sess = InferenceSession(onx.SerializeToString()) +got = sess.run(None, {'X': corpus}) + +################################### +# Let's check if there are discrepancies... + +assert_almost_equal(mod2.transform(corpus).todense(), got[0]) diff --git a/_downloads/d33786f8ad6cfe5d7913326e2138aa91/plot_cast_transformer.py b/_downloads/d33786f8ad6cfe5d7913326e2138aa91/plot_cast_transformer.py index a0ef74642..11449b808 100644 --- a/_downloads/d33786f8ad6cfe5d7913326e2138aa91/plot_cast_transformer.py +++ b/_downloads/d33786f8ad6cfe5d7913326e2138aa91/plot_cast_transformer.py @@ -21,9 +21,6 @@ follows another path in the tree. Let's see how to solve that issue. -.. contents:: - :local: - An example with fails +++++++++++++++++++++ @@ -77,8 +74,10 @@ ################################# # Conversion into ONNX. -onx1 = to_onnx(model1, X_train[:1].astype(np.float32)) -sess1 = InferenceSession(onx1.SerializeToString()) +onx1 = to_onnx(model1, X_train[:1].astype(np.float32), + target_opset=15) +sess1 = InferenceSession(onx1.SerializeToString(), + providers=["CPUExecutionProvider"]) ################################### # And the maximum difference. @@ -136,9 +135,11 @@ def maxdiff(a1, a2): exp2 = model2.predict(Xi_test) onx2 = to_onnx(model2, X_train[:1].astype(np.float32), - options={StandardScaler: {'div': 'div_cast'}}) + options={StandardScaler: {'div': 'div_cast'}}, + target_opset=15) -sess2 = InferenceSession(onx2.SerializeToString()) +sess2 = InferenceSession(onx2.SerializeToString(), + providers=["CPUExecutionProvider"]) got2 = sess2.run(None, {'X': Xi_test})[0] md2 = maxdiff(exp2, got2) diff --git a/_downloads/d436e9922b51a71358604ec00f09e7e4/plot_pipeline.py b/_downloads/d436e9922b51a71358604ec00f09e7e4/plot_pipeline.py index ba17c5d68..7c5475005 100644 --- a/_downloads/d436e9922b51a71358604ec00f09e7e4/plot_pipeline.py +++ b/_downloads/d436e9922b51a71358604ec00f09e7e4/plot_pipeline.py @@ -11,9 +11,6 @@ how to draw a model and to retrieve it in *json* format. -.. contents:: - :local: - Retrieve a model in JSON format +++++++++++++++++++++++++++++++ diff --git a/_downloads/d76acde21d34d0dcb6f54fb2fc4829be/plot_fbegin_investigate.py b/_downloads/d76acde21d34d0dcb6f54fb2fc4829be/plot_fbegin_investigate.py index 83fee144b..dd2aadf5d 100644 --- a/_downloads/d76acde21d34d0dcb6f54fb2fc4829be/plot_fbegin_investigate.py +++ b/_downloads/d76acde21d34d0dcb6f54fb2fc4829be/plot_fbegin_investigate.py @@ -17,9 +17,6 @@ of every intermediate result. This example looks into two ways of doing it. -.. contents:: - :local: - Look into pipeline steps ++++++++++++++++++++++++ @@ -50,7 +47,7 @@ pipe = Pipeline(steps=[ ('std', StandardScaler()), - ('km', KMeans(3)) + ('km', KMeans(3, n_init=3)) ]) pipe.fit(X) @@ -60,7 +57,8 @@ # returns an ONNX graph for every step. steps = collect_intermediate_steps( pipe, "pipeline", - [("X", FloatTensorType([None, X.shape[1]]))]) + [("X", FloatTensorType([None, X.shape[1]]))], + target_opset=17) ##################################### # We call method transform to population the @@ -75,7 +73,8 @@ print('----------------------------') print(step['model']) onnx_step = step['onnx_step'] - sess = InferenceSession(onnx_step.SerializeToString()) + sess = InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)}) onnx_output = onnx_outputs[-1] skl_outputs = step['model']._debug.outputs['transform'] @@ -100,7 +99,8 @@ # fails due to nan values or a dimension mismatch. -onx = to_onnx(pipe, X[:1].astype(numpy.float32)) +onx = to_onnx(pipe, X[:1].astype(numpy.float32), + target_opset=17) oinf = OnnxInference(onx) oinf.run({'X': X[:2].astype(numpy.float32)}, diff --git a/_downloads/da611e022f91cbffa55d5c357770ac5e/plot_errors_onnxruntime.ipynb b/_downloads/da611e022f91cbffa55d5c357770ac5e/plot_errors_onnxruntime.ipynb index 72247ec79..17696c767 100644 --- a/_downloads/da611e022f91cbffa55d5c357770ac5e/plot_errors_onnxruntime.ipynb +++ b/_downloads/da611e022f91cbffa55d5c357770ac5e/plot_errors_onnxruntime.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Errors with onnxruntime\n\nMany mistakes might happen with *onnxruntime*.\nThis example looks into several common situations\nin which *onnxruntime* does not return the model\nprediction but raises an exception instead.\nIt starts by loading a model\n(see `l-rf-iris-example`).\nwhich produces a logistic regression\ntrained on *Iris* datasets. The model takes\na vector of dimension 2 and returns a class among three.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnx\nimport sklearn\nimport onnxruntime as rt\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\ntry:\n from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument\nexcept ImportError:\n # onnxruntime <= 0.5\n InvalidArgument = RuntimeError\n\ndata = load_iris()\nclr = LogisticRegression().fit(data.data[:, :2], data.target)\nwith open(\"logreg_iris.onnx\", \"wb\") as f:\n f.write(\n skl2onnx.to_onnx(\n clr, data.data[:, :2].astype(np.float32),\n target_opset=12).SerializeToString())\n\nexample2 = \"logreg_iris.onnx\"\nsess = rt.InferenceSession(example2)\n\ninput_name = sess.get_inputs()[0].name\noutput_name = sess.get_outputs()[0].name" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first example fails due to *bad types*.\n*onnxruntime* only expects single floats (4 bytes)\nand cannot handle any other kind of floats.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]],\n dtype=np.float64)\n sess.run([output_name], {input_name: x})\nexcept Exception as e:\n print(\"Unexpected type\")\n print(\"{0}: {1}\".format(type(e), e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model fails to return an output if the name\nis misspelled.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\n sess.run([\"misspelled\"], {input_name: x})\nexcept Exception as e:\n print(\"Misspelled output name\")\n print(\"{0}: {1}\".format(type(e), e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The output name is optional, it can be replaced by *None*\nand *onnxruntime* will then return all the outputs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\nres = sess.run(None, {input_name: x})\nprint(\"All outputs\")\nprint(res)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The same goes if the input name is misspelled.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\n sess.run([output_name], {\"misspelled\": x})\nexcept Exception as e:\n print(\"Misspelled input name\")\n print(\"{0}: {1}\".format(type(e), e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*onnxruntime* does not necessarily fail if the input\ndimension is a multiple of the expected input dimension.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for x in [\n np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),\n np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),\n np.array([1.0, 2.0, 3.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:\n try:\n r = sess.run([output_name], {input_name: x})\n print(\"Shape={0} and predicted labels={1}\".format(x.shape, r))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))\n\nfor x in [\n np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),\n np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),\n np.array([1.0, 2.0, 3.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:\n try:\n r = sess.run(None, {input_name: x})\n print(\"Shape={0} and predicted probabilities={1}\".format(\n x.shape, r[1]))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It does not fail either if the number of dimension\nis higher than expects but produces a warning.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for x in [\n np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32),\n np.array([[[1.0, 2.0, 3.0]]], dtype=np.float32),\n np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=np.float32)]:\n try:\n r = sess.run([output_name], {input_name: x})\n print(\"Shape={0} and predicted labels={1}\".format(x.shape, r))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Errors with onnxruntime\n\nMany mistakes might happen with *onnxruntime*.\nThis example looks into several common situations\nin which *onnxruntime* does not return the model\nprediction but raises an exception instead.\nIt starts by loading a model\n(see `l-rf-iris-example`).\nwhich produces a logistic regression\ntrained on *Iris* datasets. The model takes\na vector of dimension 2 and returns a class among three.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnx\nimport sklearn\nimport onnxruntime as rt\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\ntry:\n from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument\nexcept ImportError:\n # onnxruntime <= 0.5\n InvalidArgument = RuntimeError\n\ndata = load_iris()\nclr = LogisticRegression().fit(data.data[:, :2], data.target)\nwith open(\"logreg_iris.onnx\", \"wb\") as f:\n f.write(\n skl2onnx.to_onnx(\n clr, data.data[:, :2].astype(np.float32),\n target_opset=12).SerializeToString())\n\nexample2 = \"logreg_iris.onnx\"\nsess = rt.InferenceSession(example2)\n\ninput_name = sess.get_inputs()[0].name\noutput_name = sess.get_outputs()[0].name" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first example fails due to *bad types*.\n*onnxruntime* only expects single floats (4 bytes)\nand cannot handle any other kind of floats.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]],\n dtype=np.float64)\n sess.run([output_name], {input_name: x})\nexcept Exception as e:\n print(\"Unexpected type\")\n print(\"{0}: {1}\".format(type(e), e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model fails to return an output if the name\nis misspelled.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\n sess.run([\"misspelled\"], {input_name: x})\nexcept Exception as e:\n print(\"Misspelled output name\")\n print(\"{0}: {1}\".format(type(e), e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output name is optional, it can be replaced by *None*\nand *onnxruntime* will then return all the outputs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\nres = sess.run(None, {input_name: x})\nprint(\"All outputs\")\nprint(res)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The same goes if the input name is misspelled.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)\n sess.run([output_name], {\"misspelled\": x})\nexcept Exception as e:\n print(\"Misspelled input name\")\n print(\"{0}: {1}\".format(type(e), e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*onnxruntime* does not necessarily fail if the input\ndimension is a multiple of the expected input dimension.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for x in [\n np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),\n np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),\n np.array([1.0, 2.0, 3.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:\n try:\n r = sess.run([output_name], {input_name: x})\n print(\"Shape={0} and predicted labels={1}\".format(x.shape, r))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))\n\nfor x in [\n np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),\n np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),\n np.array([1.0, 2.0, 3.0], dtype=np.float32),\n np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:\n try:\n r = sess.run(None, {input_name: x})\n print(\"Shape={0} and predicted probabilities={1}\".format(\n x.shape, r[1]))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It does not fail either if the number of dimension\nis higher than expects but produces a warning.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "for x in [\n np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32),\n np.array([[[1.0, 2.0, 3.0]]], dtype=np.float32),\n np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=np.float32)]:\n try:\n r = sess.run([output_name], {input_name: x})\n print(\"Shape={0} and predicted labels={1}\".format(x.shape, r))\n except (RuntimeError, InvalidArgument) as e:\n print(\"Shape={0} and error={1}\".format(x.shape, e))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/dee3d6abcabe2247c6b901ca5b9d137d/plot_mcustom_parser.ipynb b/_downloads/dee3d6abcabe2247c6b901ca5b9d137d/plot_mcustom_parser.ipynb index be70c22e6..103958750 100644 --- a/_downloads/dee3d6abcabe2247c6b901ca5b9d137d/plot_mcustom_parser.ipynb +++ b/_downloads/dee3d6abcabe2247c6b901ca5b9d137d/plot_mcustom_parser.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Change the number of outputs by adding a parser\n\n.. index:: parser\n\nBy default, :epkg:`sklearn-onnx` assumes that a classifier\nhas two outputs (label and probabilities), a regressor\nhas one output (prediction), a transform has one output\n(the transformed data). What if it is not the case?\nThe following example creates a custom converter\nand a custom parser which defines the number of outputs\nexpected by the converted model.\n\nExample `l-plot-custom-options` shows a converter\nwhich selects two ways to compute the same outputs.\nIn this one, the converter produces both. That would not\nbe a very efficient converter but that's just for the sake\nof using a parser. By default, a transformer only returns\none output but both are needed.\n\n## A new transformer\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxSub, OnnxMatMul, OnnxGemm)\nfrom skl2onnx import to_onnx, get_model_alias\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion into ONNX with two outputs\n\nLet's try to convert it and see what happens.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type\n\n\ndef decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n X = operator.inputs[0]\n\n dtype = guess_numpy_type(X.type)\n\n Y1 = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n\n Y2 = OnnxGemm(X, op.coef_.astype(dtype),\n (- op.mean_ @ op.coef_).astype(dtype),\n op_version=opv, alpha=1., beta=1.,\n output_names=out[1:2])\n\n Y1.add_to(scope, container)\n Y2.add_to(scope, container)\n\n\ndef decorrelate_transformer_parser(\n scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n cls_type = inputs[0].type.__class__\n val_y1 = scope.declare_local_variable('nogemm', cls_type())\n val_y2 = scope.declare_local_variable('gemm', cls_type())\n this_operator.outputs.append(val_y1)\n this_operator.outputs.append(val_y2)\n\n # ends\n return this_operator.outputs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The registration needs to declare the parser as well.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter,\n parser=decorrelate_transformer_parser)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(dec, X.astype(numpy.float32),\n target_opset=14)\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\nresults = sess.run(None, {'X': X.astype(numpy.float32)})\ny1 = results[0]\ny2 = results[1]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, y1))\nprint(diff(exp, y2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It works. The final looks like the following.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "oinf = OnnxInference(onx, runtime=\"python_compiled\")\nprint(oinf)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Final graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Change the number of outputs by adding a parser\n\n.. index:: parser\n\nBy default, :epkg:`sklearn-onnx` assumes that a classifier\nhas two outputs (label and probabilities), a regressor\nhas one output (prediction), a transform has one output\n(the transformed data). What if it is not the case?\nThe following example creates a custom converter\nand a custom parser which defines the number of outputs\nexpected by the converted model.\n\nExample `l-plot-custom-options` shows a converter\nwhich selects two ways to compute the same outputs.\nIn this one, the converter produces both. That would not\nbe a very efficient converter but that's just for the sake\nof using a parser. By default, a transformer only returns\none output but both are needed.\n\n## A new transformer\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pyquickhelper.helpgen.graphviz_helper import plot_graphviz\nfrom mlprodict.onnxrt import OnnxInference\nimport numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.datasets import load_iris\nfrom skl2onnx import update_registered_converter\nfrom skl2onnx.common.data_types import guess_numpy_type\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxSub, OnnxMatMul, OnnxGemm)\nfrom skl2onnx import to_onnx, get_model_alias\n\n\nclass DecorrelateTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Decorrelates correlated gaussian features.\n\n :param alpha: avoids non inversible matrices\n by adding *alpha* identity matrix\n\n *Attributes*\n\n * `self.mean_`: average\n * `self.coef_`: square root of the coveriance matrix\n \"\"\"\n\n def __init__(self, alpha=0.):\n BaseEstimator.__init__(self)\n TransformerMixin.__init__(self)\n self.alpha = alpha\n\n def fit(self, X, y=None, sample_weights=None):\n if sample_weights is not None:\n raise NotImplementedError(\n \"sample_weights != None is not implemented.\")\n self.mean_ = numpy.mean(X, axis=0, keepdims=True)\n X = X - self.mean_\n V = X.T @ X / X.shape[0]\n if self.alpha != 0:\n V += numpy.identity(V.shape[0]) * self.alpha\n L, P = numpy.linalg.eig(V)\n Linv = L ** (-0.5)\n diag = numpy.diag(Linv)\n root = P @ diag @ P.transpose()\n self.coef_ = root\n return self\n\n def transform(self, X):\n return (X - self.mean_) @ self.coef_\n\n\ndata = load_iris()\nX = data.data\n\ndec = DecorrelateTransformer()\ndec.fit(X)\npred = dec.transform(X[:5])\nprint(pred)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion into ONNX with two outputs\n\nLet's try to convert it and see what happens.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def decorrelate_transformer_shape_calculator(operator):\n op = operator.raw_operator\n input_type = operator.inputs[0].type.__class__\n input_dim = operator.inputs[0].type.shape[0]\n output_type = input_type([input_dim, op.coef_.shape[1]])\n operator.outputs[0].type = output_type\n\n\ndef decorrelate_transformer_converter(scope, operator, container):\n op = operator.raw_operator\n opv = container.target_opset\n out = operator.outputs\n\n X = operator.inputs[0]\n\n dtype = guess_numpy_type(X.type)\n\n Y1 = OnnxMatMul(\n OnnxSub(X, op.mean_.astype(dtype), op_version=opv),\n op.coef_.astype(dtype),\n op_version=opv, output_names=out[:1])\n\n Y2 = OnnxGemm(X, op.coef_.astype(dtype),\n (- op.mean_ @ op.coef_).astype(dtype),\n op_version=opv, alpha=1., beta=1.,\n output_names=out[1:2])\n\n Y1.add_to(scope, container)\n Y2.add_to(scope, container)\n\n\ndef decorrelate_transformer_parser(\n scope, model, inputs, custom_parsers=None):\n alias = get_model_alias(type(model))\n this_operator = scope.declare_local_operator(alias, model)\n\n # inputs\n this_operator.inputs.append(inputs[0])\n\n # outputs\n cls_type = inputs[0].type.__class__\n val_y1 = scope.declare_local_variable('nogemm', cls_type())\n val_y2 = scope.declare_local_variable('gemm', cls_type())\n this_operator.outputs.append(val_y1)\n this_operator.outputs.append(val_y2)\n\n # ends\n return this_operator.outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The registration needs to declare the parser as well.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n DecorrelateTransformer, \"SklearnDecorrelateTransformer\",\n decorrelate_transformer_shape_calculator,\n decorrelate_transformer_converter,\n parser=decorrelate_transformer_parser)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(dec, X.astype(numpy.float32),\n target_opset=14)\n\nsess = InferenceSession(onx.SerializeToString())\n\nexp = dec.transform(X.astype(numpy.float32))\nresults = sess.run(None, {'X': X.astype(numpy.float32)})\ny1 = results[0]\ny2 = results[1]\n\n\ndef diff(p1, p2):\n p1 = p1.ravel()\n p2 = p2.ravel()\n d = numpy.abs(p2 - p1)\n return d.max(), (d / numpy.abs(p1)).max()\n\n\nprint(diff(exp, y1))\nprint(diff(exp, y2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It works. The final looks like the following.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "oinf = OnnxInference(onx, runtime=\"python_compiled\")\nprint(oinf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "ax = plot_graphviz(oinf.to_dot())\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/df9658e30a7503e053e8e21c98657d85/plot_dbegin_options.py b/_downloads/df9658e30a7503e053e8e21c98657d85/plot_dbegin_options.py index 212891cbd..7695014da 100644 --- a/_downloads/df9658e30a7503e053e8e21c98657d85/plot_dbegin_options.py +++ b/_downloads/df9658e30a7503e053e8e21c98657d85/plot_dbegin_options.py @@ -14,10 +14,6 @@ users needs two different conversion for the same model? Let's see how this may be done. -.. contents:: - :local: - - Option *zipmap* +++++++++++++++ diff --git a/_downloads/e2d7770cefdde29780778832aedfce25/plot_pipeline_xgboost.ipynb b/_downloads/e2d7770cefdde29780778832aedfce25/plot_pipeline_xgboost.ipynb index 5600512d2..543020af9 100644 --- a/_downloads/e2d7770cefdde29780778832aedfce25/plot_pipeline_xgboost.ipynb +++ b/_downloads/e2d7770cefdde29780778832aedfce25/plot_pipeline_xgboost.ipynb @@ -1,176 +1,176 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with a XGBoost model\n\n.. index:: XGBoost\n\n*sklearn-onnx* only converts *scikit-learn* models into *ONNX*\nbut many libraries implement *scikit-learn* API so that their models\ncan be included in a *scikit-learn* pipeline. This example considers\na pipeline including a *XGBoost* model. *sklearn-onnx* can convert\nthe whole pipeline as long as it knows the converter associated to\na *XGBClassifier*. Let's see how to do it.\n\n## Train a XGBoost classifier\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import os\nimport numpy\nimport matplotlib.pyplot as plt\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nimport xgboost\nfrom xgboost import XGBClassifier\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nimport onnxmltools\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import convert_xgboost # noqa\nimport onnxmltools.convert.common.data_types\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', XGBClassifier(n_estimators=3))])\npipe.fit(X, y)\n\n# The conversion fails but it is expected.\n\ntry:\n convert_sklearn(pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\nexcept Exception as e:\n print(e)\n\n# The error message tells no converter was found\n# for XGBoost models. By default, *sklearn-onnx*\n# only handles models from *scikit-learn* but it can\n# be extended to every model following *scikit-learn*\n# API as long as the module knows there exists a converter\n# for every model used in a pipeline. That's why\n# we need to register a converter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Register the converter for XGBClassifier\n\nThe converter is implemented in *onnxmltools*:\n`onnxmltools...XGBoost.py\n`_.\nand the shape calculator:\n`onnxmltools...Classifier.py\n`_.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then we import the converter and shape calculator.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's register the new converter.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "update_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert again\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = convert_sklearn(\n pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_xgboost.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nPredictions with XGBoost.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_xgboost.onnx\")\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the ONNX graph\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\n\nimage = plt.imread(\"pipeline.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)\nprint(\"onnxmltools: \", onnxmltools.__version__)\nprint(\"xgboost: \", xgboost.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a XGBoost model\n\n.. index:: XGBoost\n\n*sklearn-onnx* only converts *scikit-learn* models into *ONNX*\nbut many libraries implement *scikit-learn* API so that their models\ncan be included in a *scikit-learn* pipeline. This example considers\na pipeline including a *XGBoost* model. *sklearn-onnx* can convert\nthe whole pipeline as long as it knows the converter associated to\na *XGBClassifier*. Let's see how to do it.\n\n## Train a XGBoost classifier\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import os\nimport numpy\nimport matplotlib.pyplot as plt\nimport onnx\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nimport xgboost\nfrom xgboost import XGBClassifier\nimport skl2onnx\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa\nimport onnxmltools\nfrom onnxmltools.convert.xgboost.operator_converters.XGBoost import convert_xgboost # noqa\nimport onnxmltools.convert.common.data_types\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline([('scaler', StandardScaler()),\n ('lgbm', XGBClassifier(n_estimators=3))])\npipe.fit(X, y)\n\n# The conversion fails but it is expected.\n\ntry:\n convert_sklearn(pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\nexcept Exception as e:\n print(e)\n\n# The error message tells no converter was found\n# for XGBoost models. By default, *sklearn-onnx*\n# only handles models from *scikit-learn* but it can\n# be extended to every model following *scikit-learn*\n# API as long as the module knows there exists a converter\n# for every model used in a pipeline. That's why\n# we need to register a converter." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for XGBClassifier\n\nThe converter is implemented in *onnxmltools*:\n[onnxmltools...XGBoost.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nxgboost/operator_converters/XGBoost.py).\nand the shape calculator:\n[onnxmltools...Classifier.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nxgboost/shape_calculators/Classifier.py).\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we import the converter and shape calculator.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's register the new converter.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "update_registered_converter(\n XGBClassifier, 'XGBoostXGBClassifier',\n calculate_linear_classifier_output_shapes, convert_xgboost,\n options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert again\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = convert_sklearn(\n pipe, 'pipeline_xgboost',\n [('input', FloatTensorType([None, 2]))],\n target_opset={'': 12, 'ai.onnx.ml': 2})\n\n# And save.\nwith open(\"pipeline_xgboost.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nPredictions with XGBoost.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_xgboost.onnx\")\npred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\nprint(\"predict\", pred_onx[0])\nprint(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the ONNX graph\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n model_onnx.graph, name=model_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\",\n fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\n\nimage = plt.imread(\"pipeline.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)\nprint(\"onnxmltools: \", onnxmltools.__version__)\nprint(\"xgboost: \", xgboost.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/e601ca27aac94923ef430c7db5fcd9ff/plot_gexternal_lightgbm_reg.ipynb b/_downloads/e601ca27aac94923ef430c7db5fcd9ff/plot_gexternal_lightgbm_reg.ipynb index 1d40be5de..be06b5e57 100644 --- a/_downloads/e601ca27aac94923ef430c7db5fcd9ff/plot_gexternal_lightgbm_reg.ipynb +++ b/_downloads/e601ca27aac94923ef430c7db5fcd9ff/plot_gexternal_lightgbm_reg.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Convert a pipeline with a LightGBM regressor\n\n.. index:: LightGBM\n\nThe discrepancies observed when using float and TreeEnsemble operator\n(see `l-example-discrepencies-float-double`)\nexplains why the converter for *LGBMRegressor* may introduce significant\ndiscrepancies even when it is used with float tensors.\n\nLibrary *lightgbm* is implemented with double. A random forest regressor\nwith multiple trees computes its prediction by adding the prediction of\nevery tree. After being converting into ONNX, this summation becomes\n$\\left[\\sum\\right]_{i=1}^F float(T_i(x))$,\nwhere *F* is the number of trees in the forest,\n$T_i(x)$ the output of tree *i* and $\\left[\\sum\\right]$\na float addition. The discrepancy can be expressed as\n$D(x) = |\\left[\\sum\\right]_{i=1}^F float(T_i(x)) -\n\\sum_{i=1}^F T_i(x)|$.\nThis grows with the number of trees in the forest.\n\nTo reduce the impact, an option was added to split the node\n*TreeEnsembleRegressor* into multiple ones and to do a summation\nwith double this time. If we assume the node if split into *a* nodes,\nthe discrepancies then become\n$D'(x) = |\\sum_{k=1}^a \\left[\\sum\\right]_{i=1}^{F/a}\nfloat(T_{ak + i}(x)) - \\sum_{i=1}^F T_i(x)|$.\n\n## Train a LGBMRegressor\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import packaging.version as pv\nimport warnings\nimport timeit\nimport numpy\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom lightgbm import LGBMRegressor\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import to_onnx, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_regressor_output_shapes # noqa\nfrom onnxmltools import __version__ as oml_version\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\n\n\nN = 1000\nX = numpy.random.randn(N, 20)\ny = (numpy.random.randn(N) +\n numpy.random.randn(N) * 100 * numpy.random.randint(0, 1, 1000))\n\nreg = LGBMRegressor(n_estimators=1000)\nreg.fit(X, y)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Register the converter for LGBMClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n`onnxmltools...LightGbm.py\n`_.\nand the shape calculator:\n`onnxmltools...Regressor.py\n`_.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def skl2onnx_convert_lightgbm(scope, operator, container):\n options = scope.get_options(operator.raw_operator)\n if 'split' in options:\n if pv.Version(oml_version) < pv.Version('1.9.2'):\n warnings.warn(\n \"Option split was released in version 1.9.2 but %s is \"\n \"installed. It will be ignored.\" % oml_version)\n operator.split = options['split']\n else:\n operator.split = None\n convert_lightgbm(scope, operator, container)\n\n\nupdate_registered_converter(\n LGBMRegressor, 'LightGbmLGBMRegressor',\n calculate_linear_regressor_output_shapes,\n skl2onnx_convert_lightgbm,\n options={'split': None})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert\n\nWe convert the same model following the two scenarios, one single\nTreeEnsembleRegressor node, or more. *split* parameter is the number of\ntrees per node TreeEnsembleRegressor.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2})\nmodel_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2},\n options={'split': 100})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Discrepancies\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(model_onnx.SerializeToString())\nsess_split = InferenceSession(model_onnx_split.SerializeToString())\n\nX32 = X.astype(numpy.float32)\nexpected = reg.predict(X32)\ngot = sess.run(None, {'X': X32})[0].ravel()\ngot_split = sess_split.run(None, {'X': X32})[0].ravel()\n\ndisp = numpy.abs(got - expected).sum()\ndisp_split = numpy.abs(got_split - expected).sum()\n\nprint(\"sum of discrepancies 1 node\", disp)\nprint(\"sum of discrepancies split node\",\n disp_split, \"ratio:\", disp / disp_split)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The sum of the discrepancies were reduced 4, 5 times.\nThe maximum is much better too.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "disc = numpy.abs(got - expected).max()\ndisc_split = numpy.abs(got_split - expected).max()\n\nprint(\"max discrepancies 1 node\", disc)\nprint(\"max discrepancies split node\", disc_split, \"ratio:\", disc / disc_split)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Processing time\n\nThe processing time is slower but not much.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"processing time no split\",\n timeit.timeit(\n lambda: sess.run(None, {'X': X32})[0], number=150))\nprint(\"processing time split\",\n timeit.timeit(\n lambda: sess_split.run(None, {'X': X32})[0], number=150))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Split influence\n\nLet's see how the sum of the discrepancies moves against\nthe parameter *split*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "res = []\nfor i in tqdm(list(range(20, 170, 20)) + [200, 300, 400, 500]):\n model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2},\n options={'split': i})\n sess_split = InferenceSession(model_onnx_split.SerializeToString())\n got_split = sess_split.run(None, {'X': X32})[0].ravel()\n disc_split = numpy.abs(got_split - expected).max()\n res.append(dict(split=i, disc=disc_split))\n\ndf = DataFrame(res).set_index('split')\ndf[\"baseline\"] = disc\nprint(df)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ax = df.plot(title=\"Sum of discrepancies against split\\n\"\n \"split = number of tree per node\")\n\n# plt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Convert a pipeline with a LightGBM regressor\n\n.. index:: LightGBM\n\nThe discrepancies observed when using float and TreeEnsemble operator\n(see `l-example-discrepencies-float-double`)\nexplains why the converter for *LGBMRegressor* may introduce significant\ndiscrepancies even when it is used with float tensors.\n\nLibrary *lightgbm* is implemented with double. A random forest regressor\nwith multiple trees computes its prediction by adding the prediction of\nevery tree. After being converting into ONNX, this summation becomes\n$\\left[\\sum\\right]_{i=1}^F float(T_i(x))$,\nwhere *F* is the number of trees in the forest,\n$T_i(x)$ the output of tree *i* and $\\left[\\sum\\right]$\na float addition. The discrepancy can be expressed as\n$D(x) = |\\left[\\sum\\right]_{i=1}^F float(T_i(x)) -\n\\sum_{i=1}^F T_i(x)|$.\nThis grows with the number of trees in the forest.\n\nTo reduce the impact, an option was added to split the node\n*TreeEnsembleRegressor* into multiple ones and to do a summation\nwith double this time. If we assume the node if split into *a* nodes,\nthe discrepancies then become\n$D'(x) = |\\sum_{k=1}^a \\left[\\sum\\right]_{i=1}^{F/a}\nfloat(T_{ak + i}(x)) - \\sum_{i=1}^F T_i(x)|$.\n\n## Train a LGBMRegressor\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import packaging.version as pv\nimport warnings\nimport timeit\nimport numpy\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom lightgbm import LGBMRegressor\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import to_onnx, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_regressor_output_shapes # noqa\nfrom onnxmltools import __version__ as oml_version\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm # noqa\n\n\nN = 1000\nX = numpy.random.randn(N, 20)\ny = (numpy.random.randn(N) +\n numpy.random.randn(N) * 100 * numpy.random.randint(0, 1, 1000))\n\nreg = LGBMRegressor(n_estimators=1000)\nreg.fit(X, y)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Register the converter for LGBMClassifier\n\nThe converter is implemented in :epkg:`onnxmltools`:\n[onnxmltools...LightGbm.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/operator_converters/LightGbm.py).\nand the shape calculator:\n[onnxmltools...Regressor.py](https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\nlightgbm/shape_calculators/Regressor.py).\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def skl2onnx_convert_lightgbm(scope, operator, container):\n options = scope.get_options(operator.raw_operator)\n if 'split' in options:\n if pv.Version(oml_version) < pv.Version('1.9.2'):\n warnings.warn(\n \"Option split was released in version 1.9.2 but %s is \"\n \"installed. It will be ignored.\" % oml_version)\n operator.split = options['split']\n else:\n operator.split = None\n convert_lightgbm(scope, operator, container)\n\n\nupdate_registered_converter(\n LGBMRegressor, 'LightGbmLGBMRegressor',\n calculate_linear_regressor_output_shapes,\n skl2onnx_convert_lightgbm,\n options={'split': None})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert\n\nWe convert the same model following the two scenarios, one single\nTreeEnsembleRegressor node, or more. *split* parameter is the number of\ntrees per node TreeEnsembleRegressor.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2})\nmodel_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2},\n options={'split': 100})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Discrepancies\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(model_onnx.SerializeToString())\nsess_split = InferenceSession(model_onnx_split.SerializeToString())\n\nX32 = X.astype(numpy.float32)\nexpected = reg.predict(X32)\ngot = sess.run(None, {'X': X32})[0].ravel()\ngot_split = sess_split.run(None, {'X': X32})[0].ravel()\n\ndisp = numpy.abs(got - expected).sum()\ndisp_split = numpy.abs(got_split - expected).sum()\n\nprint(\"sum of discrepancies 1 node\", disp)\nprint(\"sum of discrepancies split node\",\n disp_split, \"ratio:\", disp / disp_split)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The sum of the discrepancies were reduced 4, 5 times.\nThe maximum is much better too.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "disc = numpy.abs(got - expected).max()\ndisc_split = numpy.abs(got_split - expected).max()\n\nprint(\"max discrepancies 1 node\", disc)\nprint(\"max discrepancies split node\", disc_split, \"ratio:\", disc / disc_split)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Processing time\n\nThe processing time is slower but not much.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"processing time no split\",\n timeit.timeit(\n lambda: sess.run(None, {'X': X32})[0], number=150))\nprint(\"processing time split\",\n timeit.timeit(\n lambda: sess_split.run(None, {'X': X32})[0], number=150))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Split influence\n\nLet's see how the sum of the discrepancies moves against\nthe parameter *split*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "res = []\nfor i in tqdm(list(range(20, 170, 20)) + [200, 300, 400, 500]):\n model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),\n target_opset={'': 14, 'ai.onnx.ml': 2},\n options={'split': i})\n sess_split = InferenceSession(model_onnx_split.SerializeToString())\n got_split = sess_split.run(None, {'X': X32})[0].ravel()\n disc_split = numpy.abs(got_split - expected).max()\n res.append(dict(split=i, disc=disc_split))\n\ndf = DataFrame(res).set_index('split')\ndf[\"baseline\"] = disc\nprint(df)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "_, ax = plt.subplots(1, 1)\ndf.plot(title=\"Sum of discrepancies against split\\n\"\n \"split = number of tree per node\",\n ax=ax)\n\n# plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/e752cda73a893a8b470f3aaa438e510e/plot_benchmark_cdist.ipynb b/_downloads/e752cda73a893a8b470f3aaa438e510e/plot_benchmark_cdist.ipynb index 3528edb55..a45c84fac 100644 --- a/_downloads/e752cda73a893a8b470f3aaa438e510e/plot_benchmark_cdist.ipynb +++ b/_downloads/e752cda73a893a8b470f3aaa438e510e/plot_benchmark_cdist.ipynb @@ -1,180 +1,180 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# Compare CDist with scipy\n\nThe following example focuses on one particular operator,\nCDist and compares its execution time between\n*onnxruntime* and *scipy*.\n\n## ONNX Graph with CDist\n\n`cdist `_\nfunction computes pairwise distances.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from pprint import pprint\nfrom timeit import Timer\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom tqdm import tqdm\nfrom pandas import DataFrame\nimport onnx\nimport onnxruntime as rt\nfrom onnxruntime import InferenceSession\nimport skl2onnx\nfrom skl2onnx.algebra.custom_ops import OnnxCDist\nfrom skl2onnx.common.data_types import FloatTensorType\n\nX = np.ones((2, 4), dtype=np.float32)\nY = np.ones((3, 4), dtype=np.float32)\nY *= 2\nprint(cdist(X, Y, metric='euclidean'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "op = OnnxCDist('X', 'Y', op_version=12, output_names=['Z'],\n metric='euclidean')\nonx = op.to_onnx({'X': X, 'Y': Y},\n outputs=[('Z', FloatTensorType())])\nprint(onx)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CDist and onnxruntime\n\nWe compute the output of CDist operator\nwith onnxruntime.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = InferenceSession(onx.SerializeToString())\nres = sess.run(None, {'X': X, 'Y': Y})\nprint(res)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Benchmark\n\nLet's compare onnxruntime and scipy.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def measure_time(name, stmt, context, repeat=100, number=20):\n tim = Timer(stmt, globals=context)\n res = np.array(\n tim.repeat(repeat=repeat, number=number))\n res /= number\n mean = np.mean(res)\n dev = np.mean(res ** 2)\n dev = (dev - mean**2) ** 0.5\n return dict(\n average=mean, deviation=dev, min_exec=np.min(res),\n max_exec=np.max(res), repeat=repeat, number=number,\n nrows=context['X'].shape[0], ncols=context['Y'].shape[1],\n name=name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "scipy\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "time_scipy = measure_time(\n \"scipy\", \"cdist(X, Y)\",\n context={'cdist': cdist, 'X': X, 'Y': Y})\npprint(time_scipy)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "onnxruntime\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "time_ort = measure_time(\n \"ort\", \"sess.run(None, {'X': X, 'Y': Y})\",\n context={'sess': sess, 'X': X, 'Y': Y})\npprint(time_ort)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Longer benchmark\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "metrics = []\nfor dim in tqdm([10, 100, 1000, 10000]):\n # We cannot change the number of column otherwise\n # we need to create a new graph.\n X = np.random.randn(dim, 4).astype(np.float32)\n Y = np.random.randn(10, 4).astype(np.float32)\n\n time_scipy = measure_time(\n \"scipy\", \"cdist(X, Y)\",\n context={'cdist': cdist, 'X': X, 'Y': Y})\n time_ort = measure_time(\n \"ort\", \"sess.run(None, {'X': X, 'Y': Y})\",\n context={'sess': sess, 'X': X, 'Y': Y})\n metric = dict(N=dim, scipy=time_scipy['average'],\n ort=time_ort['average'])\n metrics.append(metric)\n\ndf = DataFrame(metrics)\ndf['scipy/ort'] = df['scipy'] / df['ort']\nprint(df)\n\ndf.plot(x='N', y=['scipy/ort'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", np.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# Compare CDist with scipy\n\nThe following example focuses on one particular operator,\nCDist and compares its execution time between\n*onnxruntime* and *scipy*.\n\n## ONNX Graph with CDist\n\n[cdist](https://docs.scipy.org/doc/scipy/reference/\ngenerated/scipy.spatial.distance.cdist.html)\nfunction computes pairwise distances.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pprint import pprint\nfrom timeit import Timer\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom tqdm import tqdm\nfrom pandas import DataFrame\nimport onnx\nimport onnxruntime as rt\nfrom onnxruntime import InferenceSession\nimport skl2onnx\nfrom skl2onnx.algebra.custom_ops import OnnxCDist\nfrom skl2onnx.common.data_types import FloatTensorType\n\nX = np.ones((2, 4), dtype=np.float32)\nY = np.ones((3, 4), dtype=np.float32)\nY *= 2\nprint(cdist(X, Y, metric='euclidean'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "op = OnnxCDist('X', 'Y', op_version=12, output_names=['Z'],\n metric='euclidean')\nonx = op.to_onnx({'X': X, 'Y': Y},\n outputs=[('Z', FloatTensorType())])\nprint(onx)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CDist and onnxruntime\n\nWe compute the output of CDist operator\nwith onnxruntime.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = InferenceSession(onx.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\nres = sess.run(None, {'X': X, 'Y': Y})\nprint(res)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Benchmark\n\nLet's compare onnxruntime and scipy.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def measure_time(name, stmt, context, repeat=100, number=20):\n tim = Timer(stmt, globals=context)\n res = np.array(\n tim.repeat(repeat=repeat, number=number))\n res /= number\n mean = np.mean(res)\n dev = np.mean(res ** 2)\n dev = (dev - mean**2) ** 0.5\n return dict(\n average=mean, deviation=dev, min_exec=np.min(res),\n max_exec=np.max(res), repeat=repeat, number=number,\n nrows=context['X'].shape[0], ncols=context['Y'].shape[1],\n name=name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "scipy\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "time_scipy = measure_time(\n \"scipy\", \"cdist(X, Y)\",\n context={'cdist': cdist, 'X': X, 'Y': Y})\npprint(time_scipy)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "onnxruntime\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "time_ort = measure_time(\n \"ort\", \"sess.run(None, {'X': X, 'Y': Y})\",\n context={'sess': sess, 'X': X, 'Y': Y})\npprint(time_ort)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Longer benchmark\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "metrics = []\nfor dim in tqdm([10, 100, 1000, 10000]):\n # We cannot change the number of column otherwise\n # we need to create a new graph.\n X = np.random.randn(dim, 4).astype(np.float32)\n Y = np.random.randn(10, 4).astype(np.float32)\n\n time_scipy = measure_time(\n \"scipy\", \"cdist(X, Y)\",\n context={'cdist': cdist, 'X': X, 'Y': Y})\n time_ort = measure_time(\n \"ort\", \"sess.run(None, {'X': X, 'Y': Y})\",\n context={'sess': sess, 'X': X, 'Y': Y})\n metric = dict(N=dim, scipy=time_scipy['average'],\n ort=time_ort['average'])\n metrics.append(metric)\n\ndf = DataFrame(metrics)\ndf['scipy/ort'] = df['scipy'] / df['ort']\nprint(df)\n\ndf.plot(x='N', y=['scipy/ort'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", np.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/e8a7f0eacc9c15b5d5d27faa6629f391/auto_tutorial_jupyter.zip b/_downloads/e8a7f0eacc9c15b5d5d27faa6629f391/auto_tutorial_jupyter.zip index cca01e3d6..ef7a98d63 100644 Binary files a/_downloads/e8a7f0eacc9c15b5d5d27faa6629f391/auto_tutorial_jupyter.zip and b/_downloads/e8a7f0eacc9c15b5d5d27faa6629f391/auto_tutorial_jupyter.zip differ diff --git a/_downloads/f169dbb62321da922d71485a30985948/plot_gconverting.ipynb b/_downloads/f169dbb62321da922d71485a30985948/plot_gconverting.ipynb index 1a8eb1de7..652f8ca1a 100644 --- a/_downloads/f169dbb62321da922d71485a30985948/plot_gconverting.ipynb +++ b/_downloads/f169dbb62321da922d71485a30985948/plot_gconverting.ipynb @@ -1,108 +1,108 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Modify the ONNX graph\n\nThis example shows how to change the default ONNX graph such as\nrenaming the inputs or outputs names.\n\n## Basic example\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx import to_onnx\n\niris = load_iris()\nX, y = iris.data, iris.target\nX = X.astype(numpy.float32)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nclr = LogisticRegression(solver=\"liblinear\")\nclr.fit(X_train, y_train)\n\n\nonx = to_onnx(clr, X, options={'zipmap': False})\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Changes the input names\n\nIt is possible to change the input name by using the\nparameter *initial_types*. However, the user must specify the input\ntypes as well.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(clr, X, options={'zipmap': False},\n initial_types=[('X56', FloatTensorType([None, X.shape[1]]))])\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Changes the output names\n\nIt is possible to change the input name by using the\nparameter *final_types*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(clr, X, options={'zipmap': False},\n final_types=[('L', Int64TensorType([None])),\n ('P', FloatTensorType([None, 3]))])\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Renaming intermediate results\n\nIt is possible to rename intermediate results by using a prefix\nor by using a function. The result will be post-processed in order\nto unique names. It does not impact the graph inputs or outputs.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def rename_results(proposed_name, existing_names):\n result = \"_\" + proposed_name.upper()\n while result in existing_names:\n result += \"A\"\n print(\"changed %r into %r.\" % (proposed_name, result))\n return result\n\n\nonx = to_onnx(clr, X, options={'zipmap': False},\n naming=rename_results)\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Modify the ONNX graph\n\nThis example shows how to change the default ONNX graph such as\nrenaming the inputs or outputs names.\n\n## Basic example\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy\nfrom onnxruntime import InferenceSession\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom skl2onnx.common.data_types import FloatTensorType, Int64TensorType\nfrom skl2onnx import to_onnx\n\niris = load_iris()\nX, y = iris.data, iris.target\nX = X.astype(numpy.float32)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nclr = LogisticRegression(solver=\"liblinear\")\nclr.fit(X_train, y_train)\n\n\nonx = to_onnx(clr, X, options={'zipmap': False},\n target_opset=15)\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Changes the input names\n\nIt is possible to change the input name by using the\nparameter *initial_types*. However, the user must specify the input\ntypes as well.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(clr, X, options={'zipmap': False},\n initial_types=[('X56', FloatTensorType([None, X.shape[1]]))],\n target_opset=15)\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Changes the output names\n\nIt is possible to change the input name by using the\nparameter *final_types*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(clr, X, options={'zipmap': False},\n final_types=[('L', Int64TensorType([None])),\n ('P', FloatTensorType([None, 3]))],\n target_opset=15)\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Renaming intermediate results\n\nIt is possible to rename intermediate results by using a prefix\nor by using a function. The result will be post-processed in order\nto unique names. It does not impact the graph inputs or outputs.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def rename_results(proposed_name, existing_names):\n result = \"_\" + proposed_name.upper()\n while result in existing_names:\n result += \"A\"\n print(\"changed %r into %r.\" % (proposed_name, result))\n return result\n\n\nonx = to_onnx(clr, X, options={'zipmap': False},\n naming=rename_results, target_opset=15)\n\nsess = InferenceSession(onx.SerializeToString())\ninput_names = [i.name for i in sess.get_inputs()]\noutput_names = [o.name for o in sess.get_outputs()]\nprint(\"inputs=%r, outputs=%r\" % (input_names, output_names))\nprint(sess.run(None, {input_names[0]: X_test[:2]}))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/f2ce31e1a8504eec1c1999f19d4592e2/plot_gbegin_transfer_learning.py b/_downloads/f2ce31e1a8504eec1c1999f19d4592e2/plot_gbegin_transfer_learning.py index 7b739db8b..c2f0c799e 100644 --- a/_downloads/f2ce31e1a8504eec1c1999f19d4592e2/plot_gbegin_transfer_learning.py +++ b/_downloads/f2ce31e1a8504eec1c1999f19d4592e2/plot_gbegin_transfer_learning.py @@ -19,9 +19,6 @@ support. Once the model is converted into ONNX, it can be inserted in any :epkg:`scikit-learn` pipeline. -.. contents:: - :local: - Retrieve and load a model +++++++++++++++++++++++++ diff --git a/_downloads/f59c6dff612756e00698c4b71664ce10/plot_investigate_pipeline.py b/_downloads/f59c6dff612756e00698c4b71664ce10/plot_investigate_pipeline.py index 6c84a8b87..51fc56afc 100644 --- a/_downloads/f59c6dff612756e00698c4b71664ce10/plot_investigate_pipeline.py +++ b/_downloads/f59c6dff612756e00698c4b71664ce10/plot_investigate_pipeline.py @@ -8,9 +8,6 @@ The following example shows how to look into a converted models and easily find errors at every step of the pipeline. -.. contents:: - :local: - Create a pipeline +++++++++++++++++ diff --git a/_downloads/f7f7bad474c7a651c2d0c2997fc42343/plot_wext_pyod_forest.py b/_downloads/f7f7bad474c7a651c2d0c2997fc42343/plot_wext_pyod_forest.py index ffa06bc9e..23c40d713 100644 --- a/_downloads/f7f7bad474c7a651c2d0c2997fc42343/plot_wext_pyod_forest.py +++ b/_downloads/f7f7bad474c7a651c2d0c2997fc42343/plot_wext_pyod_forest.py @@ -15,9 +15,6 @@ pyod.models.html#module-pyod.models.iforest>`_. This example uses :ref:`l-plot-custom-converter` as a start. -.. contents:: - :local: - Trains a model ++++++++++++++ @@ -43,7 +40,7 @@ IForest = None if IForest is not None: - data1 = {'First': [500, 500, 400, 100, 200, 300, 100], + data1 = {'First': [500, 500, 400, 100, 200, 300, 100], 'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']} df1 = pd.DataFrame(data1, columns=['First', 'Second']) diff --git a/_downloads/f8e5d8e309ca291f68bd029c26838ccc/plot_backend.ipynb b/_downloads/f8e5d8e309ca291f68bd029c26838ccc/plot_backend.ipynb index 435fb5da0..d53387145 100644 --- a/_downloads/f8e5d8e309ca291f68bd029c26838ccc/plot_backend.ipynb +++ b/_downloads/f8e5d8e309ca291f68bd029c26838ccc/plot_backend.ipynb @@ -1,151 +1,151 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n\n# ONNX Runtime Backend for ONNX\n\n.. index:: backend\n\n*ONNX Runtime* extends the\n`onnx backend API `_\nto run predictions using this runtime.\nLet's use the API to compute the prediction\nof a simple logistic regression model.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnxruntime\nimport onnx\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nimport numpy\nfrom onnxruntime import get_device\nimport numpy as np\nimport onnxruntime.backend as backend" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create an ONNX graph first.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data = load_iris()\nX, Y = data.data, data.target\nlogreg = LogisticRegression(C=1e5).fit(X, Y)\nmodel = skl2onnx.to_onnx(logreg, X.astype(np.float32))\nname = \"logreg_iris.onnx\"\nwith open(name, \"wb\") as f:\n f.write(model.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's use ONNX backend API to test it.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model = onnx.load(name)\nrep = backend.prepare(model, 'CPU')\nx = np.array([[-1.0, -2.0, 5.0, 6.0],\n [-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, 7.0, 8.0]],\n dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The device depends on how the package was compiled,\nGPU or CPU.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(get_device())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The backend can also directly load the model\nwithout using *onnx*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "rep = backend.prepare(name, 'CPU')\nx = np.array([[-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, -3.0, -4.0]],\n dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The backend API is implemented by other frameworks\nand makes it easier to switch between multiple runtimes\nwith the same API.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n\n# ONNX Runtime Backend for ONNX\n\n.. index:: backend\n\n*ONNX Runtime* extends the\n[onnx backend API](https://github.com/onnx/onnx/blob/master/docs/\nImplementingAnOnnxBackend.md)\nto run predictions using this runtime.\nLet's use the API to compute the prediction\nof a simple logistic regression model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnxruntime\nimport onnx\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nimport numpy\nfrom onnxruntime import get_device\nimport numpy as np\nimport onnxruntime.backend as backend" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create an ONNX graph first.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "data = load_iris()\nX, Y = data.data, data.target\nlogreg = LogisticRegression(C=1e5).fit(X, Y)\nmodel = skl2onnx.to_onnx(logreg, X.astype(np.float32))\nname = \"logreg_iris.onnx\"\nwith open(name, \"wb\") as f:\n f.write(model.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use ONNX backend API to test it.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model = onnx.load(name)\nrep = backend.prepare(model, 'CPU')\nx = np.array([[-1.0, -2.0, 5.0, 6.0],\n [-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, 7.0, 8.0]],\n dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The device depends on how the package was compiled,\nGPU or CPU.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(get_device())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The backend can also directly load the model\nwithout using *onnx*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "rep = backend.prepare(name, 'CPU')\nx = np.array([[-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, -3.0, -4.0],\n [-1.0, -2.0, -3.0, -4.0]],\n dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The backend API is implemented by other frameworks\nand makes it easier to switch between multiple runtimes\nwith the same API.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", onnxruntime.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/f965c1616d7ada159d0d0611b0d9e425/plot_pipeline_lightgbm.py b/_downloads/f965c1616d7ada159d0d0611b0d9e425/plot_pipeline_lightgbm.py index 2a3523607..a57bca298 100644 --- a/_downloads/f965c1616d7ada159d0d0611b0d9e425/plot_pipeline_lightgbm.py +++ b/_downloads/f965c1616d7ada159d0d0611b0d9e425/plot_pipeline_lightgbm.py @@ -16,9 +16,6 @@ the whole pipeline as long as it knows the converter associated to a *LGBMClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a LightGBM classifier +++++++++++++++++++++++++++ """ diff --git a/_downloads/fc12d1d5527085d200c7e164401cd670/plot_intermediate_outputs.ipynb b/_downloads/fc12d1d5527085d200c7e164401cd670/plot_intermediate_outputs.ipynb index 660b64d30..22e35bafb 100644 --- a/_downloads/fc12d1d5527085d200c7e164401cd670/plot_intermediate_outputs.ipynb +++ b/_downloads/fc12d1d5527085d200c7e164401cd670/plot_intermediate_outputs.ipynb @@ -1,313 +1,313 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Walk through intermediate outputs\n\nWe reuse the example `example-complex-pipeline` and\nwalk through intermediates outputs. It is very likely a converted\nmodel gives different outputs or fails due to a custom\nconverter which is not correctly implemented.\nOne option is to look into the output of every node of the\nONNX graph.\n\n## Create and train a complex pipeline\n\nWe reuse the pipeline implemented in example\n`Column Transformer with Mixed Types\n`_.\nThere is one change because\n`ONNX-ML Imputer\n`_\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom skl2onnx.helpers.onnx_helper import select_model_inputs_outputs\nfrom skl2onnx.helpers.onnx_helper import save_onnx_model\nfrom skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs\nfrom skl2onnx.helpers.onnx_helper import load_onnx_model\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx import convert_sklearn\nimport pprint\nfrom skl2onnx.common.data_types import (\n FloatTensorType, StringTensorType, Int64TensorType)\nimport numpy as np\nimport pandas as pd\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\ntitanic_url = ('https://raw.githubusercontent.com/amueller/'\n 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\ndata = pd.read_csv(titanic_url)\nX = data.drop('survived', axis=1)\ny = data['survived']\n\n# SimpleImputer on string is not available\n# for string in ONNX-ML specifications.\n# So we do it beforehand.\nfor cat in ['embarked', 'sex', 'pclass']:\n X[cat].fillna('missing', inplace=True)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nnumeric_features = ['age', 'fare']\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\ncategorical_features = ['embarked', 'sex', 'pclass']\ncategorical_transformer = Pipeline(steps=[\n # --- SimpleImputer is not available for strings in ONNX-ML specifications.\n # ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n ])\n\nclf = Pipeline(steps=[('preprocessor', preprocessor),\n ('classifier', LogisticRegression(solver='lbfgs'))])\n\nclf.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define the inputs of the ONNX graph\n\n*sklearn-onnx* does not know the features used to train the model\nbut it needs to know which feature has which name.\nWe simply reuse the dataframe column definition.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(X_train.dtypes)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After conversion.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "def convert_dataframe_schema(df, drop=None):\n inputs = []\n for k, v in zip(df.columns, df.dtypes):\n if drop is not None and k in drop:\n continue\n if v == 'int64':\n t = Int64TensorType([None, 1])\n elif v == 'float64':\n t = FloatTensorType([None, 1])\n else:\n t = StringTensorType([None, 1])\n inputs.append((k, t))\n return inputs\n\n\ninputs = convert_dataframe_schema(X_train)\n\npprint.pprint(inputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Merging single column into vectors is not\nthe most efficient way to compute the prediction.\nIt could be done before converting the pipeline into a graph.\n\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Convert the pipeline into ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "try:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*scikit-learn* does implicit conversions when it can.\n*sklearn-onnx* does not. The ONNX version of *OneHotEncoder*\nmust be applied on columns of the same type.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X_train['pclass'] = X_train['pclass'].astype(str)\nX_test['pclass'] = X_test['pclass'].astype(str)\nwhite_list = numeric_features + categorical_features\nto_drop = [c for c in X_train.columns if c not in white_list]\ninputs = convert_dataframe_schema(X_train, to_drop)\n\nmodel_onnx = convert_sklearn(clf, 'pipeline_titanic', inputs,\n target_opset=12)\n\n\n# And save.\nwith open(\"pipeline_titanic.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare the predictions\n\nFinal step, we need to ensure the converted model\nproduces the same predictions, labels and probabilities.\nLet's start with *scikit-learn*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"predict\", clf.predict(X_test[:5]))\nprint(\"predict_proba\", clf.predict_proba(X_test[:1]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Predictions with onnxruntime.\nWe need to remove the dropped columns and to change\nthe double vectors into float vectors as *onnxruntime*\ndoes not support double floats.\n*onnxruntime* does not accept *dataframe*.\ninputs must be given as a list of dictionary.\nLast detail, every column was described not really as a vector\nbut as a matrix of one column which explains the last line\nwith the *reshape*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "X_test2 = X_test.drop(to_drop, axis=1)\ninputs = {c: X_test2[c].values for c in X_test2.columns}\nfor c in numeric_features:\n inputs[c] = inputs[c].astype(np.float32)\nfor k in inputs:\n inputs[k] = inputs[k].reshape((inputs[k].shape[0], 1))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are ready to run *onnxruntime*.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_titanic.onnx\")\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compute intermediate outputs\n\nUnfortunately, there is actually no way to ask\n*onnxruntime* to retrieve the output of intermediate nodes.\nWe need to modifies the *ONNX* before it is given to *onnxruntime*.\nLet's see first the list of intermediate output.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "model_onnx = load_onnx_model(\"pipeline_titanic.onnx\")\nfor out in enumerate_model_node_outputs(model_onnx):\n print(out)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Not that easy to tell which one is what as the *ONNX*\nhas more operators than the original *scikit-learn* pipelines.\nThe graph at `l-plot-complex-pipeline-graph`\nhelps up to find the outputs of both numerical\nand textual pipeline: *variable1*, *variable2*.\nLet's look into the numerical pipeline first.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "num_onnx = select_model_inputs_outputs(model_onnx, 'variable1')\nsave_onnx_model(num_onnx, \"pipeline_titanic_numerical.onnx\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's compute the numerical features.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sess = rt.InferenceSession(\"pipeline_titanic_numerical.onnx\")\nnumX = sess.run(None, inputs)\nprint(\"numerical features\", numX[0][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We do the same for the textual features.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(model_onnx)\ntext_onnx = select_model_inputs_outputs(model_onnx, 'variable2')\nsave_onnx_model(text_onnx, \"pipeline_titanic_textual.onnx\")\nsess = rt.InferenceSession(\"pipeline_titanic_textual.onnx\")\nnumT = sess.run(None, inputs)\nprint(\"textual features\", numT[0][:1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Display the sub-ONNX graph\n\nFinally, let's see both subgraphs. First, numerical pipeline.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n num_onnx.graph, name=num_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic_num.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_num.dot')\n\nimage = plt.imread(\"pipeline_titanic_num.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then textual pipeline.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pydot_graph = GetPydotGraph(\n text_onnx.graph, name=text_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic_text.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_text.dot')\n\nimage = plt.imread(\"pipeline_titanic_text.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Walk through intermediate outputs\n\nWe reuse the example `example-complex-pipeline` and\nwalk through intermediates outputs. It is very likely a converted\nmodel gives different outputs or fails due to a custom\nconverter which is not correctly implemented.\nOne option is to look into the output of every node of the\nONNX graph.\n\n## Create and train a complex pipeline\n\nWe reuse the pipeline implemented in example\n[Column Transformer with Mixed Types](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py).\nThere is one change because\n[ONNX-ML Imputer](https://github.com/onnx/onnx/blob/master/docs/\nOperators-ml.md#ai.onnx.ml.Imputer)\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nfrom skl2onnx.helpers.onnx_helper import select_model_inputs_outputs\nfrom skl2onnx.helpers.onnx_helper import save_onnx_model\nfrom skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs\nfrom skl2onnx.helpers.onnx_helper import load_onnx_model\nimport numpy\nimport onnxruntime as rt\nfrom skl2onnx import convert_sklearn\nimport pprint\nfrom skl2onnx.common.data_types import (\n FloatTensorType, StringTensorType, Int64TensorType)\nimport numpy as np\nimport pandas as pd\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\ntitanic_url = ('https://raw.githubusercontent.com/amueller/'\n 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')\ndata = pd.read_csv(titanic_url)\nX = data.drop('survived', axis=1)\ny = data['survived']\n\n# SimpleImputer on string is not available\n# for string in ONNX-ML specifications.\n# So we do it beforehand.\nfor cat in ['embarked', 'sex', 'pclass']:\n X[cat].fillna('missing', inplace=True)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nnumeric_features = ['age', 'fare']\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\ncategorical_features = ['embarked', 'sex', 'pclass']\ncategorical_transformer = Pipeline(steps=[\n # --- SimpleImputer is not available for strings in ONNX-ML specifications.\n # ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n ])\n\nclf = Pipeline(steps=[('preprocessor', preprocessor),\n ('classifier', LogisticRegression(solver='lbfgs'))])\n\nclf.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define the inputs of the ONNX graph\n\n*sklearn-onnx* does not know the features used to train the model\nbut it needs to know which feature has which name.\nWe simply reuse the dataframe column definition.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(X_train.dtypes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After conversion.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def convert_dataframe_schema(df, drop=None):\n inputs = []\n for k, v in zip(df.columns, df.dtypes):\n if drop is not None and k in drop:\n continue\n if v == 'int64':\n t = Int64TensorType([None, 1])\n elif v == 'float64':\n t = FloatTensorType([None, 1])\n else:\n t = StringTensorType([None, 1])\n inputs.append((k, t))\n return inputs\n\n\ninputs = convert_dataframe_schema(X_train)\n\npprint.pprint(inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Merging single column into vectors is not\nthe most efficient way to compute the prediction.\nIt could be done before converting the pipeline into a graph.\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert the pipeline into ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "try:\n model_onnx = convert_sklearn(clf, 'pipeline_titanic', inputs,\n target_opset=12)\nexcept Exception as e:\n print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*scikit-learn* does implicit conversions when it can.\n*sklearn-onnx* does not. The ONNX version of *OneHotEncoder*\nmust be applied on columns of the same type.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_train['pclass'] = X_train['pclass'].astype(str)\nX_test['pclass'] = X_test['pclass'].astype(str)\nwhite_list = numeric_features + categorical_features\nto_drop = [c for c in X_train.columns if c not in white_list]\ninputs = convert_dataframe_schema(X_train, to_drop)\n\nmodel_onnx = convert_sklearn(clf, 'pipeline_titanic', inputs,\n target_opset=12)\n\n\n# And save.\nwith open(\"pipeline_titanic.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare the predictions\n\nFinal step, we need to ensure the converted model\nproduces the same predictions, labels and probabilities.\nLet's start with *scikit-learn*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"predict\", clf.predict(X_test[:5]))\nprint(\"predict_proba\", clf.predict_proba(X_test[:1]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Predictions with onnxruntime.\nWe need to remove the dropped columns and to change\nthe double vectors into float vectors as *onnxruntime*\ndoes not support double floats.\n*onnxruntime* does not accept *dataframe*.\ninputs must be given as a list of dictionary.\nLast detail, every column was described not really as a vector\nbut as a matrix of one column which explains the last line\nwith the *reshape*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_test2 = X_test.drop(to_drop, axis=1)\ninputs = {c: X_test2[c].values for c in X_test2.columns}\nfor c in numeric_features:\n inputs[c] = inputs[c].astype(np.float32)\nfor k in inputs:\n inputs[k] = inputs[k].reshape((inputs[k].shape[0], 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are ready to run *onnxruntime*.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_titanic.onnx\")\npred_onx = sess.run(None, inputs)\nprint(\"predict\", pred_onx[0][:5])\nprint(\"predict_proba\", pred_onx[1][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compute intermediate outputs\n\nUnfortunately, there is actually no way to ask\n*onnxruntime* to retrieve the output of intermediate nodes.\nWe need to modifies the *ONNX* before it is given to *onnxruntime*.\nLet's see first the list of intermediate output.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "model_onnx = load_onnx_model(\"pipeline_titanic.onnx\")\nfor out in enumerate_model_node_outputs(model_onnx):\n print(out)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Not that easy to tell which one is what as the *ONNX*\nhas more operators than the original *scikit-learn* pipelines.\nThe graph at `l-plot-complex-pipeline-graph`\nhelps up to find the outputs of both numerical\nand textual pipeline: *variable1*, *variable2*.\nLet's look into the numerical pipeline first.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "num_onnx = select_model_inputs_outputs(model_onnx, 'variable1')\nsave_onnx_model(num_onnx, \"pipeline_titanic_numerical.onnx\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's compute the numerical features.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sess = rt.InferenceSession(\"pipeline_titanic_numerical.onnx\")\nnumX = sess.run(None, inputs)\nprint(\"numerical features\", numX[0][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We do the same for the textual features.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(model_onnx)\ntext_onnx = select_model_inputs_outputs(model_onnx, 'variable2')\nsave_onnx_model(text_onnx, \"pipeline_titanic_textual.onnx\")\nsess = rt.InferenceSession(\"pipeline_titanic_textual.onnx\")\nnumT = sess.run(None, inputs)\nprint(\"textual features\", numT[0][:1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the sub-ONNX graph\n\nFinally, let's see both subgraphs. First, numerical pipeline.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n num_onnx.graph, name=num_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic_num.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_num.dot')\n\nimage = plt.imread(\"pipeline_titanic_num.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then textual pipeline.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pydot_graph = GetPydotGraph(\n text_onnx.graph, name=text_onnx.graph.name, rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"))\npydot_graph.write_dot(\"pipeline_titanic_text.dot\")\n\nos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_text.dot')\n\nimage = plt.imread(\"pipeline_titanic_text.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/fd8a84a91157151cef8ca1baa203ebf3/plot_gexternal_lightgbm_reg.py b/_downloads/fd8a84a91157151cef8ca1baa203ebf3/plot_gexternal_lightgbm_reg.py index f4d2c4c82..9ccbfa594 100644 --- a/_downloads/fd8a84a91157151cef8ca1baa203ebf3/plot_gexternal_lightgbm_reg.py +++ b/_downloads/fd8a84a91157151cef8ca1baa203ebf3/plot_gexternal_lightgbm_reg.py @@ -32,9 +32,6 @@ :math:`D'(x) = |\\sum_{k=1}^a \\left[\\sum\\right]_{i=1}^{F/a} float(T_{ak + i}(x)) - \\sum_{i=1}^F T_i(x)|`. -.. contents:: - :local: - Train a LGBMRegressor +++++++++++++++++++++ """ @@ -145,10 +142,10 @@ def skl2onnx_convert_lightgbm(scope, operator, container): print("processing time no split", timeit.timeit( - lambda: sess.run(None, {'X': X32})[0], number=150)) + lambda: sess.run(None, {'X': X32})[0], number=150)) print("processing time split", timeit.timeit( - lambda: sess_split.run(None, {'X': X32})[0], number=150)) + lambda: sess_split.run(None, {'X': X32})[0], number=150)) ############################################# # Split influence @@ -173,8 +170,9 @@ def skl2onnx_convert_lightgbm(scope, operator, container): ########################################## # Graph. - -ax = df.plot(title="Sum of discrepancies against split\n" - "split = number of tree per node") +_, ax = plt.subplots(1, 1) +df.plot(title="Sum of discrepancies against split\n" + "split = number of tree per node", + ax=ax) # plt.show() diff --git a/_downloads/fddc68f5cc0000f858ca7c34e29dbd7e/plot_bbegin_measure_time.ipynb b/_downloads/fddc68f5cc0000f858ca7c34e29dbd7e/plot_bbegin_measure_time.ipynb index 02cfdb3cf..10d42b2e0 100644 --- a/_downloads/fddc68f5cc0000f858ca7c34e29dbd7e/plot_bbegin_measure_time.ipynb +++ b/_downloads/fddc68f5cc0000f858ca7c34e29dbd7e/plot_bbegin_measure_time.ipynb @@ -1,133 +1,133 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Benchmark ONNX conversion\n\n.. index:: benchmark\n\nExample `l-simple-deploy-1` converts a simple model.\nThis example takes a similar example but on random data\nand compares the processing time required by each option\nto compute predictions.\n\n\n## Training a pipeline\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy\nfrom pandas import DataFrame\nfrom tqdm import tqdm\nfrom sklearn import config_context\nfrom sklearn.datasets import make_regression\nfrom sklearn.ensemble import (\n GradientBoostingRegressor, RandomForestRegressor,\n VotingRegressor)\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom mlprodict.onnxrt import OnnxInference\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import to_onnx\nfrom skl2onnx.tutorial import measure_time\n\n\nN = 11000\nX, y = make_regression(N, n_features=10)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=0.01)\nprint(\"Train shape\", X_train.shape)\nprint(\"Test shape\", X_test.shape)\n\nreg1 = GradientBoostingRegressor(random_state=1)\nreg2 = RandomForestRegressor(random_state=1)\nreg3 = LinearRegression()\nereg = VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])\nereg.fit(X_train, y_train)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Measure the processing time\n\nWe use function :func:`skl2onnx.tutorial.measure_time`.\nThe page about `assume_finite `_\nmay be useful if you need to optimize the prediction.\nWe measure the processing time per observation whether\nor not an observation belongs to a batch or is a single one.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sizes = [(1, 50), (10, 50), (1000, 10), (10000, 5)]\n\nwith config_context(assume_finite=True):\n obs = []\n for batch_size, repeat in tqdm(sizes):\n context = {\"ereg\": ereg, 'X': X_test[:batch_size]}\n mt = measure_time(\n \"ereg.predict(X)\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['size'] = context['X'].shape[0]\n mt['mean_obs'] = mt['average'] / mt['size']\n obs.append(mt)\n\ndf_skl = DataFrame(obs)\ndf_skl" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Graphe.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "df_skl.set_index('size')[['mean_obs']].plot(\n title=\"scikit-learn\", logx=True, logy=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ONNX runtime\n\nThe same is done with the two ONNX runtime\navailable.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),\n target_opset=14)\nsess = InferenceSession(onx.SerializeToString())\noinf = OnnxInference(onx, runtime=\"python_compiled\")\n\nobs = []\nfor batch_size, repeat in tqdm(sizes):\n\n # scikit-learn\n context = {\"ereg\": ereg, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt = measure_time(\n \"ereg.predict(X)\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['size'] = context['X'].shape[0]\n mt['skl'] = mt['average'] / mt['size']\n\n # onnxruntime\n context = {\"sess\": sess, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt2 = measure_time(\n \"sess.run(None, {'X': X})[0]\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['ort'] = mt2['average'] / mt['size']\n\n # mlprodict\n context = {\"oinf\": oinf, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt2 = measure_time(\n \"oinf.run({'X': X})['variable']\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['pyrt'] = mt2['average'] / mt['size']\n\n # end\n obs.append(mt)\n\n\ndf = DataFrame(obs)\ndf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Graph.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "df.set_index('size')[['skl', 'ort', 'pyrt']].plot(\n title=\"Average prediction time per runtime\",\n logx=True, logy=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":epkg:`ONNX` runtimes are much faster than :epkg:`scikit-learn`\nto predict one observation. :epkg:`scikit-learn` is optimized\nfor training, for batch prediction. That explains why\n:epkg:`scikit-learn` and ONNX runtimes seem to converge\nfor big batches. They use similar implementation,\nparallelization and languages (:epkg:`C++`, :epkg:`openmp`).\n\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Benchmark ONNX conversion\n\n.. index:: benchmark\n\nExample `l-simple-deploy-1` converts a simple model.\nThis example takes a similar example but on random data\nand compares the processing time required by each option\nto compute predictions.\n\n## Training a pipeline\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import numpy\nfrom pandas import DataFrame\nfrom tqdm import tqdm\nfrom sklearn import config_context\nfrom sklearn.datasets import make_regression\nfrom sklearn.ensemble import (\n GradientBoostingRegressor, RandomForestRegressor,\n VotingRegressor)\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom mlprodict.onnxrt import OnnxInference\nfrom onnxruntime import InferenceSession\nfrom skl2onnx import to_onnx\nfrom skl2onnx.tutorial import measure_time\n\n\nN = 11000\nX, y = make_regression(N, n_features=10)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=0.01)\nprint(\"Train shape\", X_train.shape)\nprint(\"Test shape\", X_test.shape)\n\nreg1 = GradientBoostingRegressor(random_state=1)\nreg2 = RandomForestRegressor(random_state=1)\nreg3 = LinearRegression()\nereg = VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])\nereg.fit(X_train, y_train)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Measure the processing time\n\nWe use function :func:`skl2onnx.tutorial.measure_time`.\nThe page about [assume_finite](https://scikit-learn.org/\nstable/modules/generated/sklearn.config_context.html)\nmay be useful if you need to optimize the prediction.\nWe measure the processing time per observation whether\nor not an observation belongs to a batch or is a single one.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "sizes = [(1, 50), (10, 50), (1000, 10), (10000, 5)]\n\nwith config_context(assume_finite=True):\n obs = []\n for batch_size, repeat in tqdm(sizes):\n context = {\"ereg\": ereg, 'X': X_test[:batch_size]}\n mt = measure_time(\n \"ereg.predict(X)\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['size'] = context['X'].shape[0]\n mt['mean_obs'] = mt['average'] / mt['size']\n obs.append(mt)\n\ndf_skl = DataFrame(obs)\ndf_skl" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Graphe.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "df_skl.set_index('size')[['mean_obs']].plot(\n title=\"scikit-learn\", logx=True, logy=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ONNX runtime\n\nThe same is done with the two ONNX runtime\navailable.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),\n target_opset=14)\nsess = InferenceSession(onx.SerializeToString())\noinf = OnnxInference(onx, runtime=\"python_compiled\")\n\nobs = []\nfor batch_size, repeat in tqdm(sizes):\n\n # scikit-learn\n context = {\"ereg\": ereg, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt = measure_time(\n \"ereg.predict(X)\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['size'] = context['X'].shape[0]\n mt['skl'] = mt['average'] / mt['size']\n\n # onnxruntime\n context = {\"sess\": sess, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt2 = measure_time(\n \"sess.run(None, {'X': X})[0]\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['ort'] = mt2['average'] / mt['size']\n\n # mlprodict\n context = {\"oinf\": oinf, 'X': X_test[:batch_size].astype(numpy.float32)}\n mt2 = measure_time(\n \"oinf.run({'X': X})['variable']\", context, div_by_number=True,\n number=10, repeat=repeat)\n mt['pyrt'] = mt2['average'] / mt['size']\n\n # end\n obs.append(mt)\n\n\ndf = DataFrame(obs)\ndf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Graph.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "df.set_index('size')[['skl', 'ort', 'pyrt']].plot(\n title=\"Average prediction time per runtime\",\n logx=True, logy=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":epkg:`ONNX` runtimes are much faster than :epkg:`scikit-learn`\nto predict one observation. :epkg:`scikit-learn` is optimized\nfor training, for batch prediction. That explains why\n:epkg:`scikit-learn` and ONNX runtimes seem to converge\nfor big batches. They use similar implementation,\nparallelization and languages (:epkg:`C++`, :epkg:`openmp`).\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_downloads/fed6d488337c64d82b0e681711163ea1/plot_benchmark_pipeline.ipynb b/_downloads/fed6d488337c64d82b0e681711163ea1/plot_benchmark_pipeline.ipynb index 5121166cc..d0c582d48 100644 --- a/_downloads/fed6d488337c64d82b0e681711163ea1/plot_benchmark_pipeline.ipynb +++ b/_downloads/fed6d488337c64d82b0e681711163ea1/plot_benchmark_pipeline.ipynb @@ -1,144 +1,144 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Benchmark a pipeline\n\nThe following example checks up on every step in a pipeline,\ncompares and benchmarks the predictions.\n\n## Create a pipeline\n\nWe reuse the pipeline implemented in example\n`Pipelining: chaining a PCA and a logistic regression\n`_.\nThere is one change because\n`ONNX-ML Imputer `_\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import skl2onnx\nimport onnx\nimport sklearn\nimport numpy\nfrom skl2onnx.helpers import collect_intermediate_steps\nfrom timeit import timeit\nfrom skl2onnx.helpers import compare_objects\nimport onnxruntime as rt\nfrom onnxconverter_common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\n\nlogistic = LogisticRegression()\npca = PCA()\npipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])\n\ndigits = datasets.load_digits()\nX_digits = digits.data[:1000]\ny_digits = digits.target[:1000]\n\npipe.fit(X_digits, y_digits)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversion to ONNX\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]\nmodel_onnx = convert_sklearn(pipe, initial_types=initial_types,\n target_opset=12)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString())\nprint(\"skl predict_proba\")\nprint(pipe.predict_proba(X_digits[:2]))\nonx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1]\ndf = pd.DataFrame(onx_pred)\nprint(\"onnx predict_proba\")\nprint(df.values)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparing outputs\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "compare_objects(pipe.predict_proba(X_digits[:2]), onx_pred)\n# No exception so they are the same." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Benchmarks\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"scikit-learn\")\nprint(timeit(\"pipe.predict_proba(X_digits[:1])\",\n number=10000, globals=globals()))\nprint(\"onnxruntime\")\nprint(timeit(\"sess.run(None, {'input': X_digits[:1].astype(np.float32)})[1]\",\n number=10000, globals=globals()))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Intermediate steps\n\nLet's imagine the final output is wrong and we need\nto look into each component of the pipeline which one\nis failing. The following method modifies the scikit-learn\npipeline to steal the intermediate outputs and produces\nan smaller ONNX graph for every operator.\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "steps = collect_intermediate_steps(\n pipe, \"pipeline\", initial_types)\n\nassert len(steps) == 2\n\npipe.predict_proba(X_digits[:2])\n\nfor i, step in enumerate(steps):\n onnx_step = step['onnx_step']\n sess = rt.InferenceSession(onnx_step.SerializeToString())\n onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)})\n skl_outputs = step['model']._debug.outputs\n if 'transform' in skl_outputs:\n compare_objects(skl_outputs['transform'], onnx_outputs[0])\n print(\"benchmark\", step['model'].__class__)\n print(\"scikit-learn\")\n print(timeit(\"step['model'].transform(X_digits[:1])\",\n number=10000, globals=globals()))\n else:\n compare_objects(skl_outputs['predict_proba'], onnx_outputs[1])\n print(\"benchmark\", step['model'].__class__)\n print(\"scikit-learn\")\n print(timeit(\"step['model'].predict_proba(X_digits[:1])\",\n number=10000, globals=globals()))\n print(\"onnxruntime\")\n print(timeit(\"sess.run(None, {'input': X_digits[:1].astype(np.float32)})\",\n number=10000, globals=globals()))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Versions used for this example**\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 0 +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Benchmark a pipeline\n\nThe following example checks up on every step in a pipeline,\ncompares and benchmarks the predictions.\n\n## Create a pipeline\n\nWe reuse the pipeline implemented in example\n[Pipelining: chaining a PCA and a logistic regression](https://scikit-learn.org/stable/auto_examples/compose/plot_digits_pipe.html).\nThere is one change because\n[ONNX-ML Imputer](https://github.com/onnx/onnx/blob/master/\ndocs/Operators-ml.md#ai.onnx.ml.Imputer)\ndoes not handle string type. This cannot be part of the final ONNX pipeline\nand must be removed. Look for comment starting with ``---`` below.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import skl2onnx\nimport onnx\nimport sklearn\nimport numpy\nfrom skl2onnx.helpers import collect_intermediate_steps\nfrom timeit import timeit\nfrom skl2onnx.helpers import compare_objects\nimport onnxruntime as rt\nfrom onnxconverter_common.data_types import FloatTensorType\nfrom skl2onnx import convert_sklearn\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\n\nlogistic = LogisticRegression()\npca = PCA()\npipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])\n\ndigits = datasets.load_digits()\nX_digits = digits.data[:1000]\ny_digits = digits.target[:1000]\n\npipe.fit(X_digits, y_digits)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversion to ONNX\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]\nmodel_onnx = convert_sklearn(pipe, initial_types=initial_types,\n target_opset=12)\n\nsess = rt.InferenceSession(model_onnx.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\nprint(\"skl predict_proba\")\nprint(pipe.predict_proba(X_digits[:2]))\nonx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1]\ndf = pd.DataFrame(onx_pred)\nprint(\"onnx predict_proba\")\nprint(df.values)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Comparing outputs\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "compare_objects(pipe.predict_proba(X_digits[:2]), onx_pred)\n# No exception so they are the same." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Benchmarks\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"scikit-learn\")\nprint(timeit(\"pipe.predict_proba(X_digits[:1])\",\n number=10000, globals=globals()))\nprint(\"onnxruntime\")\nprint(timeit(\"sess.run(None, {'input': X_digits[:1].astype(np.float32)})[1]\",\n number=10000, globals=globals()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Intermediate steps\n\nLet's imagine the final output is wrong and we need\nto look into each component of the pipeline which one\nis failing. The following method modifies the scikit-learn\npipeline to steal the intermediate outputs and produces\nan smaller ONNX graph for every operator.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "steps = collect_intermediate_steps(\n pipe, \"pipeline\", initial_types)\n\nassert len(steps) == 2\n\npipe.predict_proba(X_digits[:2])\n\nfor i, step in enumerate(steps):\n onnx_step = step['onnx_step']\n sess = rt.InferenceSession(onnx_step.SerializeToString(),\n providers=[\"CPUExecutionProvider\"])\n onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)})\n skl_outputs = step['model']._debug.outputs\n if 'transform' in skl_outputs:\n compare_objects(skl_outputs['transform'], onnx_outputs[0])\n print(\"benchmark\", step['model'].__class__)\n print(\"scikit-learn\")\n print(timeit(\"step['model'].transform(X_digits[:1])\",\n number=10000, globals=globals()))\n else:\n compare_objects(skl_outputs['predict_proba'], onnx_outputs[1])\n print(\"benchmark\", step['model'].__class__)\n print(\"scikit-learn\")\n print(timeit(\"step['model'].predict_proba(X_digits[:1])\",\n number=10000, globals=globals()))\n print(\"onnxruntime\")\n print(timeit(\"sess.run(None, {'input': X_digits[:1].astype(np.float32)})\",\n number=10000, globals=globals()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Versions used for this example**\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } \ No newline at end of file diff --git a/_images/binder_badge_logo.svg b/_images/binder_badge_logo.svg deleted file mode 100644 index 327f6b639..000000000 --- a/_images/binder_badge_logo.svg +++ /dev/null @@ -1 +0,0 @@ - launchlaunchbinderbinder \ No newline at end of file diff --git a/_images/binder_badge_logo1.svg b/_images/binder_badge_logo1.svg deleted file mode 100644 index 327f6b639..000000000 --- a/_images/binder_badge_logo1.svg +++ /dev/null @@ -1 +0,0 @@ - launchlaunchbinderbinder \ No newline at end of file diff --git a/_images/sphx_glr_plot_abegin_convert_pipeline_001.png b/_images/sphx_glr_plot_abegin_convert_pipeline_001.png index 341b3f64f..f77048ab6 100644 Binary files a/_images/sphx_glr_plot_abegin_convert_pipeline_001.png and b/_images/sphx_glr_plot_abegin_convert_pipeline_001.png differ diff --git a/_images/sphx_glr_plot_abegin_convert_pipeline_thumb.png b/_images/sphx_glr_plot_abegin_convert_pipeline_thumb.png index 3bea1d356..063a708ff 100644 Binary files a/_images/sphx_glr_plot_abegin_convert_pipeline_thumb.png and b/_images/sphx_glr_plot_abegin_convert_pipeline_thumb.png differ diff --git a/_images/sphx_glr_plot_bbegin_measure_time_001.png b/_images/sphx_glr_plot_bbegin_measure_time_001.png index 3c7d5e3fc..fadeba51e 100644 Binary files a/_images/sphx_glr_plot_bbegin_measure_time_001.png and b/_images/sphx_glr_plot_bbegin_measure_time_001.png differ diff --git a/_images/sphx_glr_plot_bbegin_measure_time_002.png b/_images/sphx_glr_plot_bbegin_measure_time_002.png index ab2833faa..52a8a26b3 100644 Binary files a/_images/sphx_glr_plot_bbegin_measure_time_002.png and b/_images/sphx_glr_plot_bbegin_measure_time_002.png differ diff --git a/_images/sphx_glr_plot_bbegin_measure_time_thumb.png b/_images/sphx_glr_plot_bbegin_measure_time_thumb.png index 95050552e..e85ecb188 100644 Binary files a/_images/sphx_glr_plot_bbegin_measure_time_thumb.png and b/_images/sphx_glr_plot_bbegin_measure_time_thumb.png differ diff --git a/_images/sphx_glr_plot_benchmark_cdist_001.png b/_images/sphx_glr_plot_benchmark_cdist_001.png index 6d3c432eb..cb85db4c8 100644 Binary files a/_images/sphx_glr_plot_benchmark_cdist_001.png and b/_images/sphx_glr_plot_benchmark_cdist_001.png differ diff --git a/_images/sphx_glr_plot_benchmark_cdist_thumb.png b/_images/sphx_glr_plot_benchmark_cdist_thumb.png index 36a736e40..7774fa581 100644 Binary files a/_images/sphx_glr_plot_benchmark_cdist_thumb.png and b/_images/sphx_glr_plot_benchmark_cdist_thumb.png differ diff --git a/_images/sphx_glr_plot_black_op_001.png b/_images/sphx_glr_plot_black_op_001.png index 0fd1c7670..df707f51b 100644 Binary files a/_images/sphx_glr_plot_black_op_001.png and b/_images/sphx_glr_plot_black_op_001.png differ diff --git a/_images/sphx_glr_plot_black_op_002.png b/_images/sphx_glr_plot_black_op_002.png index c0720a256..460e5711d 100644 Binary files a/_images/sphx_glr_plot_black_op_002.png and b/_images/sphx_glr_plot_black_op_002.png differ diff --git a/_images/sphx_glr_plot_black_op_thumb.png b/_images/sphx_glr_plot_black_op_thumb.png index 5f15f6d58..c7463d7a7 100644 Binary files a/_images/sphx_glr_plot_black_op_thumb.png and b/_images/sphx_glr_plot_black_op_thumb.png differ diff --git a/_images/sphx_glr_plot_cast_transformer_001.png b/_images/sphx_glr_plot_cast_transformer_001.png index c2f615898..fb42e8f88 100644 Binary files a/_images/sphx_glr_plot_cast_transformer_001.png and b/_images/sphx_glr_plot_cast_transformer_001.png differ diff --git a/_images/sphx_glr_plot_cast_transformer_002.png b/_images/sphx_glr_plot_cast_transformer_002.png index 6b46ca5d4..0efbb8d6e 100644 Binary files a/_images/sphx_glr_plot_cast_transformer_002.png and b/_images/sphx_glr_plot_cast_transformer_002.png differ diff --git a/_images/sphx_glr_plot_cast_transformer_thumb.png b/_images/sphx_glr_plot_cast_transformer_thumb.png index de0fef994..91a65b832 100644 Binary files a/_images/sphx_glr_plot_cast_transformer_thumb.png and b/_images/sphx_glr_plot_cast_transformer_thumb.png differ diff --git a/_images/sphx_glr_plot_catwoe_transformer_thumb.png b/_images/sphx_glr_plot_catwoe_transformer_thumb.png index 8a5fed589..b06c4e6a1 100644 Binary files a/_images/sphx_glr_plot_catwoe_transformer_thumb.png and b/_images/sphx_glr_plot_catwoe_transformer_thumb.png differ diff --git a/_images/sphx_glr_plot_cbegin_opset_001.png b/_images/sphx_glr_plot_cbegin_opset_001.png index a4f3d2e02..fb17d4c30 100644 Binary files a/_images/sphx_glr_plot_cbegin_opset_001.png and b/_images/sphx_glr_plot_cbegin_opset_001.png differ diff --git a/_images/sphx_glr_plot_cbegin_opset_thumb.png b/_images/sphx_glr_plot_cbegin_opset_thumb.png index 4c77a77d3..3628b1a4c 100644 Binary files a/_images/sphx_glr_plot_cbegin_opset_thumb.png and b/_images/sphx_glr_plot_cbegin_opset_thumb.png differ diff --git a/_images/sphx_glr_plot_complex_pipeline_001.png b/_images/sphx_glr_plot_complex_pipeline_001.png index 07c6d4afa..d2cd9dc6e 100644 Binary files a/_images/sphx_glr_plot_complex_pipeline_001.png and b/_images/sphx_glr_plot_complex_pipeline_001.png differ diff --git a/_images/sphx_glr_plot_complex_pipeline_thumb.png b/_images/sphx_glr_plot_complex_pipeline_thumb.png index 342cb4225..0149cbb4a 100644 Binary files a/_images/sphx_glr_plot_complex_pipeline_thumb.png and b/_images/sphx_glr_plot_complex_pipeline_thumb.png differ diff --git a/_images/sphx_glr_plot_convert_syntax_001.png b/_images/sphx_glr_plot_convert_syntax_001.png index ab7a7d8ef..414c4b1ec 100644 Binary files a/_images/sphx_glr_plot_convert_syntax_001.png and b/_images/sphx_glr_plot_convert_syntax_001.png differ diff --git a/_images/sphx_glr_plot_convert_syntax_thumb.png b/_images/sphx_glr_plot_convert_syntax_thumb.png index f6663e153..5eed52a62 100644 Binary files a/_images/sphx_glr_plot_convert_syntax_thumb.png and b/_images/sphx_glr_plot_convert_syntax_thumb.png differ diff --git a/_images/sphx_glr_plot_custom_model_001.png b/_images/sphx_glr_plot_custom_model_001.png index 2f3d5da76..8b3b1bb0d 100644 Binary files a/_images/sphx_glr_plot_custom_model_001.png and b/_images/sphx_glr_plot_custom_model_001.png differ diff --git a/_images/sphx_glr_plot_custom_model_002.png b/_images/sphx_glr_plot_custom_model_002.png index fe666407c..78ba3ee68 100644 Binary files a/_images/sphx_glr_plot_custom_model_002.png and b/_images/sphx_glr_plot_custom_model_002.png differ diff --git a/_images/sphx_glr_plot_custom_model_003.png b/_images/sphx_glr_plot_custom_model_003.png index 4a3930430..3de414e3b 100644 Binary files a/_images/sphx_glr_plot_custom_model_003.png and b/_images/sphx_glr_plot_custom_model_003.png differ diff --git a/_images/sphx_glr_plot_custom_model_004.png b/_images/sphx_glr_plot_custom_model_004.png index 643e9a1c8..08072b2ad 100644 Binary files a/_images/sphx_glr_plot_custom_model_004.png and b/_images/sphx_glr_plot_custom_model_004.png differ diff --git a/_images/sphx_glr_plot_custom_model_thumb.png b/_images/sphx_glr_plot_custom_model_thumb.png index ba25805e2..54cffd666 100644 Binary files a/_images/sphx_glr_plot_custom_model_thumb.png and b/_images/sphx_glr_plot_custom_model_thumb.png differ diff --git a/_images/sphx_glr_plot_custom_parser_001.png b/_images/sphx_glr_plot_custom_parser_001.png index e4bd39f37..278508e3f 100644 Binary files a/_images/sphx_glr_plot_custom_parser_001.png and b/_images/sphx_glr_plot_custom_parser_001.png differ diff --git a/_images/sphx_glr_plot_custom_parser_alternative_001.png b/_images/sphx_glr_plot_custom_parser_alternative_001.png index f822aa198..cd15f4fbc 100644 Binary files a/_images/sphx_glr_plot_custom_parser_alternative_001.png and b/_images/sphx_glr_plot_custom_parser_alternative_001.png differ diff --git a/_images/sphx_glr_plot_custom_parser_alternative_thumb.png b/_images/sphx_glr_plot_custom_parser_alternative_thumb.png index bfec0f45c..057fa127d 100644 Binary files a/_images/sphx_glr_plot_custom_parser_alternative_thumb.png and b/_images/sphx_glr_plot_custom_parser_alternative_thumb.png differ diff --git a/_images/sphx_glr_plot_custom_parser_thumb.png b/_images/sphx_glr_plot_custom_parser_thumb.png index 6d70220d7..4396d47ed 100644 Binary files a/_images/sphx_glr_plot_custom_parser_thumb.png and b/_images/sphx_glr_plot_custom_parser_thumb.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_001.png b/_images/sphx_glr_plot_dbegin_options_001.png index 5ad73186d..9fbf35880 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_001.png and b/_images/sphx_glr_plot_dbegin_options_001.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_002.png b/_images/sphx_glr_plot_dbegin_options_002.png index 0bbc6fcc3..a42d9fe7b 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_002.png and b/_images/sphx_glr_plot_dbegin_options_002.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_003.png b/_images/sphx_glr_plot_dbegin_options_003.png index 5ad73186d..9fbf35880 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_003.png and b/_images/sphx_glr_plot_dbegin_options_003.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_004.png b/_images/sphx_glr_plot_dbegin_options_004.png index 04aeaeaa4..72bbffb67 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_004.png and b/_images/sphx_glr_plot_dbegin_options_004.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_list_001.png b/_images/sphx_glr_plot_dbegin_options_list_001.png index cea9d74ed..37fb73eef 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_list_001.png and b/_images/sphx_glr_plot_dbegin_options_list_001.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_list_002.png b/_images/sphx_glr_plot_dbegin_options_list_002.png index 6cffe9b09..bb6433cbf 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_list_002.png and b/_images/sphx_glr_plot_dbegin_options_list_002.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_list_thumb.png b/_images/sphx_glr_plot_dbegin_options_list_thumb.png index 559666b9d..1ea23bd7b 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_list_thumb.png and b/_images/sphx_glr_plot_dbegin_options_list_thumb.png differ diff --git a/_images/sphx_glr_plot_dbegin_options_thumb.png b/_images/sphx_glr_plot_dbegin_options_thumb.png index aa262806e..1b4508161 100644 Binary files a/_images/sphx_glr_plot_dbegin_options_thumb.png and b/_images/sphx_glr_plot_dbegin_options_thumb.png differ diff --git a/_images/sphx_glr_plot_ebegin_float_double_001.png b/_images/sphx_glr_plot_ebegin_float_double_001.png index af2ef7eea..40563d99b 100644 Binary files a/_images/sphx_glr_plot_ebegin_float_double_001.png and b/_images/sphx_glr_plot_ebegin_float_double_001.png differ diff --git a/_images/sphx_glr_plot_ebegin_float_double_thumb.png b/_images/sphx_glr_plot_ebegin_float_double_thumb.png index b119247cb..978cf0abd 100644 Binary files a/_images/sphx_glr_plot_ebegin_float_double_thumb.png and b/_images/sphx_glr_plot_ebegin_float_double_thumb.png differ diff --git a/_images/sphx_glr_plot_fbegin_investigate_001.png b/_images/sphx_glr_plot_fbegin_investigate_001.png index 72e4d7e2b..273df7527 100644 Binary files a/_images/sphx_glr_plot_fbegin_investigate_001.png and b/_images/sphx_glr_plot_fbegin_investigate_001.png differ diff --git a/_images/sphx_glr_plot_fbegin_investigate_thumb.png b/_images/sphx_glr_plot_fbegin_investigate_thumb.png index 5b3fad2b2..f4e763497 100644 Binary files a/_images/sphx_glr_plot_fbegin_investigate_thumb.png and b/_images/sphx_glr_plot_fbegin_investigate_thumb.png differ diff --git a/_images/sphx_glr_plot_gbegin_dataframe_001.png b/_images/sphx_glr_plot_gbegin_dataframe_001.png index f47155937..3f1ac876e 100644 Binary files a/_images/sphx_glr_plot_gbegin_dataframe_001.png and b/_images/sphx_glr_plot_gbegin_dataframe_001.png differ diff --git a/_images/sphx_glr_plot_gbegin_dataframe_002.png b/_images/sphx_glr_plot_gbegin_dataframe_002.png index 021e01666..1a2e080f9 100644 Binary files a/_images/sphx_glr_plot_gbegin_dataframe_002.png and b/_images/sphx_glr_plot_gbegin_dataframe_002.png differ diff --git a/_images/sphx_glr_plot_gbegin_dataframe_thumb.png b/_images/sphx_glr_plot_gbegin_dataframe_thumb.png index bed6354b1..ed4070710 100644 Binary files a/_images/sphx_glr_plot_gbegin_dataframe_thumb.png and b/_images/sphx_glr_plot_gbegin_dataframe_thumb.png differ diff --git a/_images/sphx_glr_plot_gbegin_transfer_learning_001.png b/_images/sphx_glr_plot_gbegin_transfer_learning_001.png index 751597fec..c26be50d1 100644 Binary files a/_images/sphx_glr_plot_gbegin_transfer_learning_001.png and b/_images/sphx_glr_plot_gbegin_transfer_learning_001.png differ diff --git a/_images/sphx_glr_plot_gbegin_transfer_learning_002.png b/_images/sphx_glr_plot_gbegin_transfer_learning_002.png index a547335cf..a7d290de0 100644 Binary files a/_images/sphx_glr_plot_gbegin_transfer_learning_002.png and b/_images/sphx_glr_plot_gbegin_transfer_learning_002.png differ diff --git a/_images/sphx_glr_plot_gbegin_transfer_learning_003.png b/_images/sphx_glr_plot_gbegin_transfer_learning_003.png index beaecb9ac..890287f67 100644 Binary files a/_images/sphx_glr_plot_gbegin_transfer_learning_003.png and b/_images/sphx_glr_plot_gbegin_transfer_learning_003.png differ diff --git a/_images/sphx_glr_plot_gexternal_catboost_001.png b/_images/sphx_glr_plot_gexternal_catboost_001.png new file mode 100644 index 000000000..d4b269f28 Binary files /dev/null and b/_images/sphx_glr_plot_gexternal_catboost_001.png differ diff --git a/_images/sphx_glr_plot_gexternal_catboost_thumb.png b/_images/sphx_glr_plot_gexternal_catboost_thumb.png new file mode 100644 index 000000000..c17b51d79 Binary files /dev/null and b/_images/sphx_glr_plot_gexternal_catboost_thumb.png differ diff --git a/_images/sphx_glr_plot_gexternal_lightgbm_001.png b/_images/sphx_glr_plot_gexternal_lightgbm_001.png index 5833b44ae..ac2135e5d 100644 Binary files a/_images/sphx_glr_plot_gexternal_lightgbm_001.png and b/_images/sphx_glr_plot_gexternal_lightgbm_001.png differ diff --git a/_images/sphx_glr_plot_gexternal_lightgbm_reg_001.png b/_images/sphx_glr_plot_gexternal_lightgbm_reg_001.png index 8cf27e87f..e36a5a04f 100644 Binary files a/_images/sphx_glr_plot_gexternal_lightgbm_reg_001.png and b/_images/sphx_glr_plot_gexternal_lightgbm_reg_001.png differ diff --git a/_images/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png b/_images/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png index 17533b8e3..2e7e10d43 100644 Binary files a/_images/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png and b/_images/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png differ diff --git a/_images/sphx_glr_plot_gexternal_lightgbm_thumb.png b/_images/sphx_glr_plot_gexternal_lightgbm_thumb.png index b258ccc91..41f66ce2a 100644 Binary files a/_images/sphx_glr_plot_gexternal_lightgbm_thumb.png and b/_images/sphx_glr_plot_gexternal_lightgbm_thumb.png differ diff --git a/_images/sphx_glr_plot_gexternal_xgboost_001.png b/_images/sphx_glr_plot_gexternal_xgboost_001.png index fdc9acc68..7cee500f9 100644 Binary files a/_images/sphx_glr_plot_gexternal_xgboost_001.png and b/_images/sphx_glr_plot_gexternal_xgboost_001.png differ diff --git a/_images/sphx_glr_plot_gexternal_xgboost_thumb.png b/_images/sphx_glr_plot_gexternal_xgboost_thumb.png index 86d2f05ef..ac8961f83 100644 Binary files a/_images/sphx_glr_plot_gexternal_xgboost_thumb.png and b/_images/sphx_glr_plot_gexternal_xgboost_thumb.png differ diff --git a/_images/sphx_glr_plot_icustom_converter_001.png b/_images/sphx_glr_plot_icustom_converter_001.png index bf94111b0..5b85be8d0 100644 Binary files a/_images/sphx_glr_plot_icustom_converter_001.png and b/_images/sphx_glr_plot_icustom_converter_001.png differ diff --git a/_images/sphx_glr_plot_icustom_converter_thumb.png b/_images/sphx_glr_plot_icustom_converter_thumb.png index 55538cd32..86cb58638 100644 Binary files a/_images/sphx_glr_plot_icustom_converter_thumb.png and b/_images/sphx_glr_plot_icustom_converter_thumb.png differ diff --git a/_images/sphx_glr_plot_intermediate_outputs_001.png b/_images/sphx_glr_plot_intermediate_outputs_001.png index c6675f226..ed0544479 100644 Binary files a/_images/sphx_glr_plot_intermediate_outputs_001.png and b/_images/sphx_glr_plot_intermediate_outputs_001.png differ diff --git a/_images/sphx_glr_plot_intermediate_outputs_002.png b/_images/sphx_glr_plot_intermediate_outputs_002.png index ab7b5371d..56fcd43ce 100644 Binary files a/_images/sphx_glr_plot_intermediate_outputs_002.png and b/_images/sphx_glr_plot_intermediate_outputs_002.png differ diff --git a/_images/sphx_glr_plot_intermediate_outputs_thumb.png b/_images/sphx_glr_plot_intermediate_outputs_thumb.png index 521b8e9b2..34710b840 100644 Binary files a/_images/sphx_glr_plot_intermediate_outputs_thumb.png and b/_images/sphx_glr_plot_intermediate_outputs_thumb.png differ diff --git a/_images/sphx_glr_plot_kcustom_converter_wrapper_001.png b/_images/sphx_glr_plot_kcustom_converter_wrapper_001.png index b9845df19..1737acf9a 100644 Binary files a/_images/sphx_glr_plot_kcustom_converter_wrapper_001.png and b/_images/sphx_glr_plot_kcustom_converter_wrapper_001.png differ diff --git a/_images/sphx_glr_plot_kcustom_converter_wrapper_thumb.png b/_images/sphx_glr_plot_kcustom_converter_wrapper_thumb.png index 147fb230d..d53f24068 100644 Binary files a/_images/sphx_glr_plot_kcustom_converter_wrapper_thumb.png and b/_images/sphx_glr_plot_kcustom_converter_wrapper_thumb.png differ diff --git a/_images/sphx_glr_plot_lcustom_options_001.png b/_images/sphx_glr_plot_lcustom_options_001.png index 734526693..25aa0c54f 100644 Binary files a/_images/sphx_glr_plot_lcustom_options_001.png and b/_images/sphx_glr_plot_lcustom_options_001.png differ diff --git a/_images/sphx_glr_plot_lcustom_options_thumb.png b/_images/sphx_glr_plot_lcustom_options_thumb.png index 180c5ca68..bd3357d11 100644 Binary files a/_images/sphx_glr_plot_lcustom_options_thumb.png and b/_images/sphx_glr_plot_lcustom_options_thumb.png differ diff --git a/_images/sphx_glr_plot_mcustom_parser_001.png b/_images/sphx_glr_plot_mcustom_parser_001.png index efd17f378..c6dac344d 100644 Binary files a/_images/sphx_glr_plot_mcustom_parser_001.png and b/_images/sphx_glr_plot_mcustom_parser_001.png differ diff --git a/_images/sphx_glr_plot_mcustom_parser_thumb.png b/_images/sphx_glr_plot_mcustom_parser_thumb.png index 7631bf831..3ce5bafdf 100644 Binary files a/_images/sphx_glr_plot_mcustom_parser_thumb.png and b/_images/sphx_glr_plot_mcustom_parser_thumb.png differ diff --git a/_images/sphx_glr_plot_ngrams_thumb.png b/_images/sphx_glr_plot_ngrams_thumb.png new file mode 100644 index 000000000..8a5fed589 Binary files /dev/null and b/_images/sphx_glr_plot_ngrams_thumb.png differ diff --git a/_images/sphx_glr_plot_nmf_001.png b/_images/sphx_glr_plot_nmf_001.png index 7bcc59183..ab2bc7b78 100644 Binary files a/_images/sphx_glr_plot_nmf_001.png and b/_images/sphx_glr_plot_nmf_001.png differ diff --git a/_images/sphx_glr_plot_nmf_thumb.png b/_images/sphx_glr_plot_nmf_thumb.png index ee1a7d6ae..384dea470 100644 Binary files a/_images/sphx_glr_plot_nmf_thumb.png and b/_images/sphx_glr_plot_nmf_thumb.png differ diff --git a/_images/sphx_glr_plot_onnx_operators_001.png b/_images/sphx_glr_plot_onnx_operators_001.png index 6f45baae3..0d1486c8e 100644 Binary files a/_images/sphx_glr_plot_onnx_operators_001.png and b/_images/sphx_glr_plot_onnx_operators_001.png differ diff --git a/_images/sphx_glr_plot_onnx_operators_thumb.png b/_images/sphx_glr_plot_onnx_operators_thumb.png index ff9f923a1..09e6d8373 100644 Binary files a/_images/sphx_glr_plot_onnx_operators_thumb.png and b/_images/sphx_glr_plot_onnx_operators_thumb.png differ diff --git a/_images/sphx_glr_plot_pextend_python_runtime_001.png b/_images/sphx_glr_plot_pextend_python_runtime_001.png index 4d31ba384..4e2148c84 100644 Binary files a/_images/sphx_glr_plot_pextend_python_runtime_001.png and b/_images/sphx_glr_plot_pextend_python_runtime_001.png differ diff --git a/_images/sphx_glr_plot_pextend_python_runtime_thumb.png b/_images/sphx_glr_plot_pextend_python_runtime_thumb.png index cd88d320d..8559e75c3 100644 Binary files a/_images/sphx_glr_plot_pextend_python_runtime_thumb.png and b/_images/sphx_glr_plot_pextend_python_runtime_thumb.png differ diff --git a/_images/sphx_glr_plot_pipeline_001.png b/_images/sphx_glr_plot_pipeline_001.png index eac207e52..82f02e839 100644 Binary files a/_images/sphx_glr_plot_pipeline_001.png and b/_images/sphx_glr_plot_pipeline_001.png differ diff --git a/_images/sphx_glr_plot_pipeline_lightgbm_001.png b/_images/sphx_glr_plot_pipeline_lightgbm_001.png index 3c95e8ba7..ba37cbe74 100644 Binary files a/_images/sphx_glr_plot_pipeline_lightgbm_001.png and b/_images/sphx_glr_plot_pipeline_lightgbm_001.png differ diff --git a/_images/sphx_glr_plot_pipeline_lightgbm_thumb.png b/_images/sphx_glr_plot_pipeline_lightgbm_thumb.png index c241bb36e..7c826eac1 100644 Binary files a/_images/sphx_glr_plot_pipeline_lightgbm_thumb.png and b/_images/sphx_glr_plot_pipeline_lightgbm_thumb.png differ diff --git a/_images/sphx_glr_plot_pipeline_thumb.png b/_images/sphx_glr_plot_pipeline_thumb.png index 108145d6e..b2a5289db 100644 Binary files a/_images/sphx_glr_plot_pipeline_thumb.png and b/_images/sphx_glr_plot_pipeline_thumb.png differ diff --git a/_images/sphx_glr_plot_pipeline_xgboost_001.png b/_images/sphx_glr_plot_pipeline_xgboost_001.png index 963b8f713..bb0f81769 100644 Binary files a/_images/sphx_glr_plot_pipeline_xgboost_001.png and b/_images/sphx_glr_plot_pipeline_xgboost_001.png differ diff --git a/_images/sphx_glr_plot_pipeline_xgboost_thumb.png b/_images/sphx_glr_plot_pipeline_xgboost_thumb.png index fd8b8a476..1873e089f 100644 Binary files a/_images/sphx_glr_plot_pipeline_xgboost_thumb.png and b/_images/sphx_glr_plot_pipeline_xgboost_thumb.png differ diff --git a/_images/sphx_glr_plot_tfidfvectorizer_001.png b/_images/sphx_glr_plot_tfidfvectorizer_001.png index be5e9b17d..cad0249c0 100644 Binary files a/_images/sphx_glr_plot_tfidfvectorizer_001.png and b/_images/sphx_glr_plot_tfidfvectorizer_001.png differ diff --git a/_images/sphx_glr_plot_tfidfvectorizer_thumb.png b/_images/sphx_glr_plot_tfidfvectorizer_thumb.png index 4c5c4fccd..b5448d56b 100644 Binary files a/_images/sphx_glr_plot_tfidfvectorizer_thumb.png and b/_images/sphx_glr_plot_tfidfvectorizer_thumb.png differ diff --git a/_images/sphx_glr_plot_transformer_discrepancy_thumb.png b/_images/sphx_glr_plot_transformer_discrepancy_thumb.png new file mode 100644 index 000000000..8a5fed589 Binary files /dev/null and b/_images/sphx_glr_plot_transformer_discrepancy_thumb.png differ diff --git a/_images/sphx_glr_plot_woe_transformer_001.png b/_images/sphx_glr_plot_woe_transformer_001.png index 1e1bbfad2..b3df65654 100644 Binary files a/_images/sphx_glr_plot_woe_transformer_001.png and b/_images/sphx_glr_plot_woe_transformer_001.png differ diff --git a/_images/sphx_glr_plot_woe_transformer_002.png b/_images/sphx_glr_plot_woe_transformer_002.png index d4fd90c4d..e50ff5843 100644 Binary files a/_images/sphx_glr_plot_woe_transformer_002.png and b/_images/sphx_glr_plot_woe_transformer_002.png differ diff --git a/_images/sphx_glr_plot_woe_transformer_thumb.png b/_images/sphx_glr_plot_woe_transformer_thumb.png index 84b7a3a79..98a2f5b96 100644 Binary files a/_images/sphx_glr_plot_woe_transformer_thumb.png and b/_images/sphx_glr_plot_woe_transformer_thumb.png differ diff --git a/_modules/index.html b/_modules/index.html index 81e133134..719803825 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -1,241 +1,331 @@ - - - - - - - - Overview: module code — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- + + + + + + + + Overview: module code - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+ +
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/onnxconverter_common/utils.html b/_modules/onnxconverter_common/utils.html index f5ca9fcf6..9c18ea6d9 100644 --- a/_modules/onnxconverter_common/utils.html +++ b/_modules/onnxconverter_common/utils.html @@ -1,561 +1,649 @@ - - - - - - - - onnxconverter_common.utils — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for onnxconverter_common.utils

-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for
-# license information.
-###############################################################################
-
-import numbers
-import numpy as np
-import warnings
-from distutils.version import LooseVersion
-
-
-def sparkml_installed():
-    """
-    Checks that *spark* is available.
-    """
-    try:
-        import pyspark  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def sklearn_installed():
-    """
-    Checks that *scikit-learn* is available.
-    """
-    try:
-        import sklearn  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def skl2onnx_installed():
-    """
-    Checks that *skl2onnx* converter is available.
-    """
-    try:
-        import skl2onnx  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def coreml_installed():
-    """
-    Checks that *coremltools* is available.
-    """
-    try:
-        import coremltools  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def keras2onnx_installed():
-    """
-    Checks that *keras2onnx* is available.
-    """
-    try:
-        import keras2onnx  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def torch_installed():
-    """
-    Checks that *pytorch* is available.
-    """
-    try:
-        import torch  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def caffe2_installed():
-    """
-    Checks that *caffe* is available.
-    """
-    try:
-        import caffe2  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def libsvm_installed():
-    """
-    Checks that *libsvm* is available.
-    """
-    try:
-        import svm  # noqa F401
-        import svmutil  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def lightgbm_installed():
-    """
-    Checks that *lightgbm* is available.
-    """
-    try:
-        import lightgbm  # noqa F401
-        return True
-    except ImportError:
-        return False
-
-
-def xgboost_installed():
-    """
-    Checks that *xgboost* is available.
-    """
-    try:
-        import xgboost  # noqa F401
-    except ImportError:
-        return False
-    from xgboost.core import _LIB
-    try:
-        _LIB.XGBoosterDumpModelEx
-    except AttributeError:
-        # The version is not recent enough even though it is version 0.6.
-        # You need to install xgboost from github and not from pypi.
-        return False
-    from xgboost import __version__
-    vers = LooseVersion(__version__)
-    allowed = LooseVersion('0.7')
-    if vers < allowed:
-        warnings.warn('The converter works for xgboost >= 0.7. Earlier versions might not.')
-    return True
-
-
-def h2o_installed():
-    """
-    Checks that *h2o* is available.
-    """
-    try:
-        import h2o  # noqa F401
-    except ImportError:
-        return False
-    return True
-
-
-def hummingbird_installed():
-    """
-    Checks that *Hummingbird* is available.
-    """
-    try:
-        import hummingbird.ml  # noqa: F401
-
-        return True
-    except ImportError:
-        return False
-
-
-def get_producer():
-    """
-    Internal helper function to return the producer
-    """
-    from . import __producer__
-    return __producer__
-
-
-def get_producer_version():
-    """
-    Internal helper function to return the producer version
-    """
-    from . import __producer_version__
-    return __producer_version__
-
-
-def get_domain():
-    """
-    Internal helper function to return the model domain
-    """
-    from . import __domain__
-    return __domain__
-
-
-def get_model_version():
-    """
-    Internal helper function to return the model version
-    """
-    from . import __model_version__
-    return __model_version__
-
-
-def is_numeric_type(item):
-    numeric_types = (int, float, complex)
-    types = numeric_types
-
-    if isinstance(item, list):
-        return all(isinstance(i, types) for i in item)
-    if isinstance(item, np.ndarray):
-        return np.issubdtype(item.dtype, np.number)
-    return isinstance(item, types)
-
-
-def is_string_type(item):
-    if isinstance(item, list):
-        return all(isinstance(i, str) for i in item)
-    if isinstance(item, np.ndarray):
-        return np.issubdtype(item.dtype, np.str_)
-    return isinstance(item, str)
-
-
-def cast_list(type, items):
-    return [type(item) for item in items]
-
-
-def convert_to_python_value(var):
-    if isinstance(var, numbers.Integral):
-        return int(var)
-    elif isinstance(var, numbers.Real):
-        return float(var)
-    elif isinstance(var, str):
-        return str(var)
-    else:
-        raise TypeError('Unable to convert {0} to python type'.format(type(var)))
-
-
-def convert_to_python_default_value(var):
-    if isinstance(var, numbers.Integral):
-        return int()
-    elif isinstance(var, numbers.Real):
-        return float()
-    elif isinstance(var, str):
-        return str()
-    else:
-        raise TypeError('Unable to find default python value for type {0}'.format(type(var)))
-
-
-def convert_to_list(var):
-    if isinstance(var, numbers.Real) or isinstance(var, str):
-        return [convert_to_python_value(var)]
-    elif isinstance(var, np.ndarray) and len(var.shape) == 1:
-        return [convert_to_python_value(v) for v in var]
-    elif isinstance(var, list):
-        flattened = []
-        if all(isinstance(ele, np.ndarray) and len(ele.shape) == 1 for ele in var):
-            max_classes = max([ele.shape[0] for ele in var])
-            flattened_one = []
-            for ele in var:
-                for i in range(max_classes):
-                    if i < ele.shape[0]:
-                        flattened_one.append(convert_to_python_value(ele[i]))
-                    else:
-                        flattened_one.append(convert_to_python_default_value(ele[0]))
-            flattened += flattened_one
-            return flattened
-        elif all(isinstance(v, numbers.Real) or isinstance(v, str) for v in var):
-            return [convert_to_python_value(v) for v in var]
-        else:
-            raise TypeError('Unable to flatten variable')
-    else:
-        raise TypeError('Unable to flatten variable')
-
-
-
[docs]def check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None): - ''' - Check if the number of input(s)/output(s) is correct - - :param operator: A Operator object - :param input_count_range: A list of two integers or an integer. If it's a list the first/second element is the - minimal/maximal number of inputs. If it's an integer, it is equivalent to specify that number twice in a list. For - infinite ranges like 5 to infinity, you need to use [5, None]. - :param output_count_range: A list of two integers or an integer. See input_count_range for its format. - ''' - if isinstance(input_count_range, list): - min_input_count = input_count_range[0] - max_input_count = input_count_range[1] - elif isinstance(input_count_range, int) or input_count_range is None: - min_input_count = input_count_range - max_input_count = input_count_range - else: - raise RuntimeError('input_count_range must be a list or an integer') - - if isinstance(output_count_range, list): - min_output_count = output_count_range[0] - max_output_count = output_count_range[1] - elif isinstance(output_count_range, int) or output_count_range is None: - min_output_count = output_count_range - max_output_count = output_count_range - else: - raise RuntimeError('output_count_range must be a list or an integer') - - if min_input_count is not None and len(operator.inputs) < min_input_count: - raise RuntimeError( - 'For operator %s (type: %s), at least %s input(s) is(are) required but we got %s input(s) which are %s' - % (operator.full_name, operator.type, min_input_count, len(operator.inputs), operator.input_full_names)) - - if max_input_count is not None and len(operator.inputs) > max_input_count: - raise RuntimeError( - 'For operator %s (type: %s), at most %s input(s) is(are) supported but we got %s input(s) which are %s' - % (operator.full_name, operator.type, max_input_count, len(operator.inputs), operator.input_full_names)) - - if min_output_count is not None and len(operator.outputs) < min_output_count: - raise RuntimeError( - 'For operator %s (type: %s), at least %s output(s) is(are) produced but we got %s output(s) which are %s' - % (operator.full_name, operator.type, min_output_count, len(operator.outputs), operator.output_full_names)) - - if max_output_count is not None and len(operator.outputs) > max_output_count: - raise RuntimeError( - 'For operator %s (type: %s), at most %s outputs(s) is(are) supported but we got %s output(s) which are %s' - % (operator.full_name, operator.type, max_output_count, len(operator.outputs), operator.output_full_names))
- - -
[docs]def check_input_and_output_types(operator, good_input_types=None, good_output_types=None): - ''' - Check if the type(s) of input(s)/output(s) is(are) correct - - :param operator: A Operator object - :param good_input_types: A list of allowed input types (e.g., [FloatTensorType, Int64TensorType]) or None. None - means that we skip the check of the input types. - :param good_output_types: A list of allowed output types. See good_input_types for its format. - ''' - if good_input_types is not None: - for variable in operator.inputs: - if type(variable.type) not in good_input_types: - raise RuntimeError('Operator %s (type: %s) got an input %s with a wrong type %s. Only %s are allowed' - % (operator.full_name, operator.type, variable.full_name, type(variable.type), - good_input_types)) - - if good_output_types is not None: - for variable in operator.outputs: - if type(variable.type) not in good_output_types: - raise RuntimeError('Operator %s (type: %s) got an output %s with a wrong type %s. Only %s are allowed' - % (operator.full_name, operator.type, variable.full_name, type(variable.type), - good_output_types))
-
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + onnxconverter_common.utils - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for onnxconverter_common.utils

+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+###############################################################################
+
+import numbers
+import numpy as np
+import warnings
+import packaging.version as pv
+
+
+def sparkml_installed():
+    """
+    Checks that *spark* is available.
+    """
+    try:
+        import pyspark  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def sklearn_installed():
+    """
+    Checks that *scikit-learn* is available.
+    """
+    try:
+        import sklearn  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def skl2onnx_installed():
+    """
+    Checks that *skl2onnx* converter is available.
+    """
+    try:
+        import skl2onnx  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def coreml_installed():
+    """
+    Checks that *coremltools* is available.
+    """
+    try:
+        import coremltools  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def keras2onnx_installed():
+    """
+    Checks that *keras2onnx* is available.
+    """
+    try:
+        import keras2onnx  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def torch_installed():
+    """
+    Checks that *pytorch* is available.
+    """
+    try:
+        import torch  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def caffe2_installed():
+    """
+    Checks that *caffe* is available.
+    """
+    try:
+        import caffe2  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def libsvm_installed():
+    """
+    Checks that *libsvm* is available.
+    """
+    try:
+        import svm  # noqa F401
+        import svmutil  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def lightgbm_installed():
+    """
+    Checks that *lightgbm* is available.
+    """
+    try:
+        import lightgbm  # noqa F401
+        return True
+    except ImportError:
+        return False
+
+
+def xgboost_installed():
+    """
+    Checks that *xgboost* is available.
+    """
+    try:
+        import xgboost  # noqa F401
+    except ImportError:
+        return False
+    from xgboost.core import _LIB
+    try:
+        _LIB.XGBoosterDumpModelEx
+    except AttributeError:
+        # The version is not recent enough even though it is version 0.6.
+        # You need to install xgboost from github and not from pypi.
+        return False
+    from xgboost import __version__
+    vers = pv.Version(__version__)
+    allowed = pv.Version('0.7')
+    if vers < allowed:
+        warnings.warn('The converter works for xgboost >= 0.7. Earlier versions might not.')
+    return True
+
+
+def h2o_installed():
+    """
+    Checks that *h2o* is available.
+    """
+    try:
+        import h2o  # noqa F401
+    except ImportError:
+        return False
+    return True
+
+
+def hummingbird_installed():
+    """
+    Checks that *Hummingbird* is available.
+    """
+    try:
+        import hummingbird.ml  # noqa: F401
+
+        return True
+    except ImportError:
+        return False
+
+
+def get_producer():
+    """
+    Internal helper function to return the producer
+    """
+    from . import __producer__
+    return __producer__
+
+
+def get_producer_version():
+    """
+    Internal helper function to return the producer version
+    """
+    from . import __producer_version__
+    return __producer_version__
+
+
+def get_domain():
+    """
+    Internal helper function to return the model domain
+    """
+    from . import __domain__
+    return __domain__
+
+
+def get_model_version():
+    """
+    Internal helper function to return the model version
+    """
+    from . import __model_version__
+    return __model_version__
+
+
+def is_numeric_type(item):
+    numeric_types = (int, float, complex)
+    types = numeric_types
+
+    if isinstance(item, list):
+        return all(isinstance(i, types) for i in item)
+    if isinstance(item, np.ndarray):
+        return np.issubdtype(item.dtype, np.number)
+    return isinstance(item, types)
+
+
+def is_string_type(item):
+    if isinstance(item, list):
+        return all(isinstance(i, str) for i in item)
+    if isinstance(item, np.ndarray):
+        return np.issubdtype(item.dtype, np.str_)
+    return isinstance(item, str)
+
+
+def cast_list(type, items):
+    return [type(item) for item in items]
+
+
+def convert_to_python_value(var):
+    if isinstance(var, numbers.Integral):
+        return int(var)
+    elif isinstance(var, numbers.Real):
+        return float(var)
+    elif isinstance(var, str):
+        return str(var)
+    else:
+        raise TypeError('Unable to convert {0} to python type'.format(type(var)))
+
+
+def convert_to_python_default_value(var):
+    if isinstance(var, numbers.Integral):
+        return int()
+    elif isinstance(var, numbers.Real):
+        return float()
+    elif isinstance(var, str):
+        return str()
+    else:
+        raise TypeError('Unable to find default python value for type {0}'.format(type(var)))
+
+
+def convert_to_list(var):
+    if isinstance(var, numbers.Real) or isinstance(var, str):
+        return [convert_to_python_value(var)]
+    elif isinstance(var, np.ndarray) and len(var.shape) == 1:
+        return [convert_to_python_value(v) for v in var]
+    elif isinstance(var, list):
+        flattened = []
+        if all(isinstance(ele, np.ndarray) and len(ele.shape) == 1 for ele in var):
+            max_classes = max([ele.shape[0] for ele in var])
+            flattened_one = []
+            for ele in var:
+                for i in range(max_classes):
+                    if i < ele.shape[0]:
+                        flattened_one.append(convert_to_python_value(ele[i]))
+                    else:
+                        flattened_one.append(convert_to_python_default_value(ele[0]))
+            flattened += flattened_one
+            return flattened
+        elif all(isinstance(v, numbers.Real) or isinstance(v, str) for v in var):
+            return [convert_to_python_value(v) for v in var]
+        else:
+            raise TypeError('Unable to flatten variable')
+    else:
+        raise TypeError('Unable to flatten variable')
+
+
+
[docs]def check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None): + ''' + Check if the number of input(s)/output(s) is correct + + :param operator: A Operator object + :param input_count_range: A list of two integers or an integer. If it's a list the first/second element is the + minimal/maximal number of inputs. If it's an integer, it is equivalent to specify that number twice in a list. For + infinite ranges like 5 to infinity, you need to use [5, None]. + :param output_count_range: A list of two integers or an integer. See input_count_range for its format. + ''' + if isinstance(input_count_range, list): + min_input_count = input_count_range[0] + max_input_count = input_count_range[1] + elif isinstance(input_count_range, int) or input_count_range is None: + min_input_count = input_count_range + max_input_count = input_count_range + else: + raise RuntimeError('input_count_range must be a list or an integer') + + if isinstance(output_count_range, list): + min_output_count = output_count_range[0] + max_output_count = output_count_range[1] + elif isinstance(output_count_range, int) or output_count_range is None: + min_output_count = output_count_range + max_output_count = output_count_range + else: + raise RuntimeError('output_count_range must be a list or an integer') + + if min_input_count is not None and len(operator.inputs) < min_input_count: + raise RuntimeError( + 'For operator %s (type: %s), at least %s input(s) is(are) required but we got %s input(s) which are %s' + % (operator.full_name, operator.type, min_input_count, len(operator.inputs), operator.input_full_names)) + + if max_input_count is not None and len(operator.inputs) > max_input_count: + raise RuntimeError( + 'For operator %s (type: %s), at most %s input(s) is(are) supported but we got %s input(s) which are %s' + % (operator.full_name, operator.type, max_input_count, len(operator.inputs), operator.input_full_names)) + + if min_output_count is not None and len(operator.outputs) < min_output_count: + raise RuntimeError( + 'For operator %s (type: %s), at least %s output(s) is(are) produced but we got %s output(s) which are %s' + % (operator.full_name, operator.type, min_output_count, len(operator.outputs), operator.output_full_names)) + + if max_output_count is not None and len(operator.outputs) > max_output_count: + raise RuntimeError( + 'For operator %s (type: %s), at most %s outputs(s) is(are) supported but we got %s output(s) which are %s' + % (operator.full_name, operator.type, max_output_count, len(operator.outputs), operator.output_full_names))
+ + +
[docs]def check_input_and_output_types(operator, good_input_types=None, good_output_types=None): + ''' + Check if the type(s) of input(s)/output(s) is(are) correct + + :param operator: A Operator object + :param good_input_types: A list of allowed input types (e.g., [FloatTensorType, Int64TensorType]) or None. None + means that we skip the check of the input types. + :param good_output_types: A list of allowed output types. See good_input_types for its format. + ''' + if good_input_types is not None: + for variable in operator.inputs: + if type(variable.type) not in good_input_types: + raise RuntimeError('Operator %s (type: %s) got an input %s with a wrong type %s. Only %s are allowed' + % (operator.full_name, operator.type, variable.full_name, type(variable.type), + good_input_types)) + + if good_output_types is not None: + for variable in operator.outputs: + if type(variable.type) not in good_output_types: + raise RuntimeError('Operator %s (type: %s) got an output %s with a wrong type %s. Only %s are allowed' + % (operator.full_name, operator.type, variable.full_name, type(variable.type), + good_output_types))
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx.html b/_modules/skl2onnx.html index 46ee022b4..813ec124e 100644 --- a/_modules/skl2onnx.html +++ b/_modules/skl2onnx.html @@ -1,275 +1,363 @@ - - - - - - - - skl2onnx — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx

-# SPDX-License-Identifier: Apache-2.0
-
-"""
-Main entry point to the converter from the *scikit-learn* to *onnx*.
-"""
-__version__ = "1.11.2"
-__author__ = "Microsoft"
-__producer__ = "skl2onnx"
-__producer_version__ = __version__
-__domain__ = "ai.onnx"
-__model_version__ = 0
-__max_supported_opset__ = 15  # Converters are tested up to this version.
-
-
-from .convert import convert_sklearn, to_onnx, wrap_as_onnx_mixin  # noqa
-from ._supported_operators import (  # noqa
-    update_registered_converter, get_model_alias
-)
-from ._parse import update_registered_parser  # noqa
-from .proto import get_latest_tested_opset_version  # noqa
-
-
-
[docs]def supported_converters(from_sklearn=False): - """ - Returns the list of supported converters. - To find the converter associated to a specific model, - the library gets the name of the model class, - adds ``'Sklearn'`` as a prefix and retrieves - the associated converter if available. - - :param from_sklearn: every supported model is mapped to converter - by a name prefixed with ``'Sklearn'``, the prefix is removed - if this parameter is False but the function only returns converters - whose name is prefixed by ``'Sklearn'`` - :return: list of supported models as string - """ - from .common._registration import _converter_pool # noqa - # The two following lines populates the list of supported converters. - from . import shape_calculators # noqa - from . import operator_converters # noqa - - names = sorted(_converter_pool.keys()) - if from_sklearn: - return [_[7:] for _ in names if _.startswith('Sklearn')] - return list(names)
-
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx

+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Main entry point to the converter from the *scikit-learn* to *onnx*.
+"""
+__version__ = "1.14.0"
+__author__ = "Microsoft"
+__producer__ = "skl2onnx"
+__producer_version__ = __version__
+__domain__ = "ai.onnx"
+__model_version__ = 0
+__max_supported_opset__ = 18  # Converters are tested up to this version.
+
+
+from .convert import convert_sklearn, to_onnx, wrap_as_onnx_mixin  # noqa
+from ._supported_operators import (  # noqa
+    update_registered_converter, get_model_alias
+)
+from ._parse import update_registered_parser  # noqa
+from .proto import get_latest_tested_opset_version  # noqa
+
+
+
[docs]def supported_converters(from_sklearn=False): + """ + Returns the list of supported converters. + To find the converter associated to a specific model, + the library gets the name of the model class, + adds ``'Sklearn'`` as a prefix and retrieves + the associated converter if available. + + :param from_sklearn: every supported model is mapped to converter + by a name prefixed with ``'Sklearn'``, the prefix is removed + if this parameter is False but the function only returns converters + whose name is prefixed by ``'Sklearn'`` + :return: list of supported models as string + """ + from .common._registration import _converter_pool # noqa + # The two following lines populates the list of supported converters. + from . import shape_calculators # noqa + from . import operator_converters # noqa + + names = sorted(_converter_pool.keys()) + if from_sklearn: + return [_[7:] for _ in names if _.startswith('Sklearn')] + return list(names)
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/_parse.html b/_modules/skl2onnx/_parse.html index 9b4b1b0e3..1da9f7eae 100644 --- a/_modules/skl2onnx/_parse.html +++ b/_modules/skl2onnx/_parse.html @@ -1,1064 +1,1153 @@ - - - - - - - - skl2onnx._parse — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx._parse

-# SPDX-License-Identifier: Apache-2.0
-
-import warnings
-import numpy as np
-
-from sklearn import pipeline
-from sklearn.base import (
-    ClassifierMixin, ClusterMixin, is_classifier)
-try:
-    from sklearn.base import OutlierMixin
-except ImportError:
-    # scikit-learn <= 0.19
-    class OutlierMixin:
-        pass
-
-from sklearn.ensemble import (
-    IsolationForest, RandomTreesEmbedding, RandomForestClassifier)
-from sklearn.gaussian_process import GaussianProcessRegressor
-from sklearn.linear_model import BayesianRidge
-from sklearn.model_selection import GridSearchCV
-from sklearn.neighbors import NearestNeighbors, LocalOutlierFactor
-from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
-from sklearn.multioutput import MultiOutputClassifier
-from sklearn.preprocessing import OneHotEncoder
-from sklearn.pipeline import Pipeline
-from sklearn.svm import LinearSVC, NuSVC, SVC
-try:
-    from sklearn.compose import ColumnTransformer
-except ImportError:
-    # ColumnTransformer was introduced in 0.20.
-    ColumnTransformer = None
-try:
-    from sklearn.preprocessing import Imputer
-except ImportError:
-    Imputer = None
-try:
-    from sklearn.impute import SimpleImputer
-except ImportError:
-    # changed in 0.20
-    SimpleImputer = None
-
-from ._supported_operators import (
-    _get_sklearn_operator_name, cluster_list, outlier_list)
-from ._supported_operators import (
-    sklearn_classifier_list, sklearn_operator_name_map)
-from .common._container import SklearnModelContainerNode
-from .common._registration import _converter_pool, _shape_calculator_pool
-from .common._topology import Topology, Variable
-from .common.data_types import (
-    DictionaryType, Int64TensorType, SequenceType,
-    StringTensorType, TensorType, FloatTensorType,
-    guess_tensor_type)
-from .common.utils import get_column_indices
-from .common.utils_checking import check_signature
-from .common.utils_classifier import get_label_classes
-from .common.utils_sklearn import _process_options
-
-
-do_not_merge_columns = tuple(
-    filter(lambda op: op is not None,
-           [OneHotEncoder, ColumnTransformer]))
-
-
-def _fetch_input_slice(scope, inputs, column_indices):
-    if not isinstance(inputs, list):
-        raise TypeError("Parameter inputs must be a list.")
-    if len(inputs) == 0:
-        raise RuntimeError("Operator ArrayFeatureExtractor requires at "
-                           "least one inputs.")
-    if len(inputs) != 1:
-        raise RuntimeError("Operator ArrayFeatureExtractor does not support "
-                           "multiple input tensors.")
-    if (isinstance(inputs[0].type, TensorType) and
-            len(inputs[0].type.shape) == 2 and
-            inputs[0].type.shape[1] == len(column_indices)):
-        # No need to extract.
-        return inputs
-    array_feature_extractor_operator = scope.declare_local_operator(
-        'SklearnArrayFeatureExtractor')
-    array_feature_extractor_operator.inputs = inputs
-    array_feature_extractor_operator.column_indices = column_indices
-    output_variable_name = scope.declare_local_variable(
-        'extracted_feature_columns', inputs[0].type)
-    array_feature_extractor_operator.outputs.append(output_variable_name)
-    return array_feature_extractor_operator.outputs
-
-
-def _parse_sklearn_simple_model(scope, model, inputs, custom_parsers=None,
-                                alias=None):
-    """
-    This function handles all non-pipeline models.
-
-    :param scope: Scope object
-    :param model: A scikit-learn object (e.g., *OneHotEncoder*
-        or *LogisticRegression*)
-    :param inputs: A list of variables
-    :param custom_parsers: dictionary of custom parsers
-    :param alias: use this alias instead of the one based on the class name
-    :return: A list of output variables which will be passed to next
-        stage
-    """
-    # alias can be None
-    if isinstance(model, str):
-        raise RuntimeError("Parameter model must be an object not a "
-                           "string '{0}'.".format(model))
-    if any(not isinstance(i, Variable) for i in inputs):
-        raise TypeError(
-            "One input is not a Variable for model %r - %r."
-            "" % (model, inputs))
-    if alias is None:
-        alias = _get_sklearn_operator_name(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-    this_operator.inputs = inputs
-
-    if hasattr(model, 'onnx_parser'):
-        parser_names = model.onnx_parser()
-        if parser_names is not None:
-            try:
-                names = parser_names(scope=scope, inputs=inputs)
-            except TypeError as e:
-                warnings.warn(
-                    "Calling parser %r for model type %r failed due to %r. "
-                    "This warnings will become an exception in version 1.11. "
-                    "The parser signature should parser(scope=None, "
-                    "inputs=None)." % (
-                        parser_names, e, type(model)),
-                    DeprecationWarning)
-                names = parser_names()
-            if names is not None:
-                for name in names:
-                    if isinstance(name, Variable):
-                        this_operator.outputs.append(name)
-                    elif isinstance(name, str):
-                        var = scope.declare_local_variable(
-                            name, guess_tensor_type(inputs[0].type))
-                        this_operator.outputs.append(var)
-                    elif isinstance(name, tuple) and len(name) == 2:
-                        var = scope.declare_local_variable(
-                            name[0], guess_tensor_type(name[1]))
-                        this_operator.outputs.append(var)
-                    else:
-                        raise RuntimeError(
-                            "Unexpected output type %r (value=%r) for "
-                            "operator %r." % (
-                                type(name), name, type(model)))
-                return this_operator.outputs
-
-    if (type(model) in sklearn_classifier_list
-            or isinstance(model, ClassifierMixin)
-            or (isinstance(model, GridSearchCV)
-                and is_classifier(model))):
-        # For classifiers, we may have two outputs, one for label and
-        # the other one for probabilities of all classes. Notice that
-        # their types here are not necessarily correct and they will
-        # be fixed in shape inference phase.
-        label_variable = scope.declare_local_variable(
-            'label', Int64TensorType())
-        if type(model) in [RandomForestClassifier]:
-            prob_dtype = FloatTensorType()
-        else:
-            prob_dtype = guess_tensor_type(inputs[0].type)
-        probability_tensor_variable = scope.declare_local_variable(
-            'probabilities', prob_dtype)
-        this_operator.outputs.append(label_variable)
-        this_operator.outputs.append(probability_tensor_variable)
-
-    elif type(model) in cluster_list or isinstance(model, ClusterMixin):
-        # For clustering, we may have two outputs, one for label and
-        # the other one for scores of all classes. Notice that their
-        # types here are not necessarily correct and they will be fixed
-        # in shape inference phase
-        label_variable = scope.declare_local_variable(
-            'label', Int64TensorType())
-        score_tensor_variable = scope.declare_local_variable(
-            'scores', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(label_variable)
-        this_operator.outputs.append(score_tensor_variable)
-
-    elif type(model) in {IsolationForest, LocalOutlierFactor}:
-        label_variable = scope.declare_local_variable(
-            'label', Int64TensorType())
-        score_tensor_variable = scope.declare_local_variable(
-            'scores', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(label_variable)
-        this_operator.outputs.append(score_tensor_variable)
-        options = scope.get_options(model, dict(score_samples=False))
-        if options['score_samples']:
-            scores_var = scope.declare_local_variable(
-                'score_samples', guess_tensor_type(inputs[0].type))
-            this_operator.outputs.append(scores_var)
-
-    elif type(model) in outlier_list or isinstance(model, OutlierMixin):
-        # For outliers, we may have two outputs, one for label and
-        # the other one for scores.
-        label_variable = scope.declare_local_variable(
-            'label', Int64TensorType())
-        score_tensor_variable = scope.declare_local_variable(
-            'scores', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(label_variable)
-        this_operator.outputs.append(score_tensor_variable)
-
-    elif type(model) == NearestNeighbors:
-        # For Nearest Neighbours, we have two outputs, one for nearest
-        # neighbours' indices and the other one for distances
-        index_variable = scope.declare_local_variable(
-            'index', Int64TensorType())
-        distance_variable = scope.declare_local_variable(
-            'distance', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(index_variable)
-        this_operator.outputs.append(distance_variable)
-
-    elif type(model) in {GaussianMixture, BayesianGaussianMixture}:
-        label_variable = scope.declare_local_variable(
-            'label', Int64TensorType())
-        prob_variable = scope.declare_local_variable(
-            'probabilities', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(label_variable)
-        this_operator.outputs.append(prob_variable)
-        options = scope.get_options(model, dict(score_samples=False))
-        if options['score_samples']:
-            scores_var = scope.declare_local_variable(
-                'score_samples', guess_tensor_type(inputs[0].type))
-            this_operator.outputs.append(scores_var)
-    elif type(model) in {SimpleImputer, Imputer}:
-        if isinstance(inputs[0].type, (Int64TensorType, StringTensorType)):
-            otype = inputs[0].type.__class__()
-        else:
-            otype = guess_tensor_type(inputs[0].type)
-        variable = scope.declare_local_variable('variable', otype)
-        this_operator.outputs.append(variable)
-    else:
-        if hasattr(model, 'get_feature_names_out'):
-            try:
-                out_names = model.get_feature_names_out()
-            except (AttributeError, ValueError):
-                # Catch a bug in scikit-learn.
-                out_names = None
-            this_operator.feature_names_out_ = out_names
-        input_type = guess_tensor_type(inputs[0].type)
-        variable = scope.declare_local_variable(
-            'variable', input_type)
-        this_operator.outputs.append(variable)
-
-    options = scope.get_options(model, dict(decision_path=False), fail=False)
-    if options is not None and options['decision_path']:
-        dec_path = scope.declare_local_variable(
-            'decision_path', StringTensorType())
-        this_operator.outputs.append(dec_path)
-
-    options = scope.get_options(model, dict(decision_leaf=False), fail=False)
-    if options is not None and options['decision_leaf']:
-        dec_path = scope.declare_local_variable(
-            'decision_leaf', Int64TensorType())
-        this_operator.outputs.append(dec_path)
-
-    return this_operator.outputs
-
-
-def _parse_sklearn_pipeline(scope, model, inputs, custom_parsers=None):
-    """
-    The basic ideas of scikit-learn parsing:
-        1. Sequentially go though all stages defined in the considered
-           scikit-learn pipeline
-        2. The output variables of one stage will be fed into its next
-           stage as the inputs.
-
-    :param scope: Scope object defined in _topology.py
-    :param model: scikit-learn pipeline object
-    :param inputs: A list of Variable objects
-    :return: A list of output variables produced by the input pipeline
-    """
-    for step in model.steps:
-        inputs = _parse_sklearn(scope, step[1], inputs,
-                                custom_parsers=custom_parsers)
-    return inputs
-
-
-def _parse_sklearn_feature_union(scope, model, inputs, custom_parsers=None):
-    """
-    :param scope: Scope object
-    :param model: A scikit-learn FeatureUnion object
-    :param inputs: A list of Variable objects
-    :return: A list of output variables produced by feature union
-    """
-    # Output variable name of each transform. It's a list of string.
-    transformed_result_names = []
-    # Encode each transform as our IR object
-    for name, transform in model.transformer_list:
-        transformed_result_names.append(
-            _parse_sklearn_simple_model(
-                scope, transform, inputs,
-                custom_parsers=custom_parsers)[0])
-        if (model.transformer_weights is not None and name in
-                model.transformer_weights):
-            transform_result = [transformed_result_names.pop()]
-            # Create a Multiply ONNX node
-            multiply_operator = scope.declare_local_operator('SklearnMultiply')
-            multiply_operator.inputs = transform_result
-            multiply_operator.operand = model.transformer_weights[name]
-            multiply_output = scope.declare_local_variable(
-                'multiply_output', guess_tensor_type(inputs[0].type))
-            multiply_operator.outputs.append(multiply_output)
-            transformed_result_names.append(multiply_operator.outputs[0])
-
-    # Create a Concat ONNX node
-    concat_operator = scope.declare_local_operator('SklearnConcat')
-    concat_operator.inputs = transformed_result_names
-
-    # Declare output name of scikit-learn FeatureUnion
-    union_name = scope.declare_local_variable(
-        'union', guess_tensor_type(inputs[0].type))
-    concat_operator.outputs.append(union_name)
-
-    return concat_operator.outputs
-
-
-def _parse_sklearn_column_transformer(scope, model, inputs,
-                                      custom_parsers=None):
-    """
-    :param scope: Scope object
-    :param model: A *scikit-learn* *ColumnTransformer* object
-    :param inputs: A list of Variable objects
-    :return: A list of output variables produced by column transformer
-    """
-    # Output variable name of each transform. It's a list of string.
-    transformed_result_names = []
-    # Encode each transform as our IR object
-    for name, op, column_indices in model.transformers_:
-        if op == 'drop':
-            continue
-        if isinstance(column_indices, slice):
-            column_indices = list(range(
-                column_indices.start
-                if column_indices.start is not None else 0,
-                column_indices.stop, column_indices.step
-                if column_indices.step is not None else 1))
-        elif isinstance(column_indices, (int, str)):
-            column_indices = [column_indices]
-        names = get_column_indices(column_indices, inputs, multiple=True)
-        transform_inputs = []
-        for onnx_var, onnx_is in names.items():
-            tr_inputs = _fetch_input_slice(scope, [inputs[onnx_var]], onnx_is)
-            transform_inputs.extend(tr_inputs)
-
-        merged_cols = False
-        if len(transform_inputs) > 1:
-            if isinstance(op, Pipeline):
-                if not isinstance(op.steps[0][1], do_not_merge_columns):
-                    merged_cols = True
-            elif not isinstance(op, do_not_merge_columns):
-                merged_cols = True
-
-        if merged_cols:
-            # Many ONNX operators expect one input vector,
-            # the default behaviour is to merge columns.
-            ty = transform_inputs[0].type.__class__([None, None])
-
-            conc_op = scope.declare_local_operator('SklearnConcat')
-            conc_op.inputs = transform_inputs
-            conc_names = scope.declare_local_variable('merged_columns', ty)
-            conc_op.outputs.append(conc_names)
-            transform_inputs = [conc_names]
-
-        model_obj = model.named_transformers_[name]
-        if isinstance(model_obj, str):
-            if model_obj == "passthrough":
-                var_out = transform_inputs[0]
-            elif model_obj == "drop":
-                var_out = None
-            else:
-                raise RuntimeError("Unknown operator alias "
-                                   "'{0}'. These are specified in "
-                                   "_supported_operators.py."
-                                   "".format(model_obj))
-        else:
-            var_out = _parse_sklearn(
-                scope, model_obj,
-                transform_inputs, custom_parsers=custom_parsers)[0]
-            if (model.transformer_weights is not None and name in
-                    model.transformer_weights):
-                # Create a Multiply ONNX node
-                multiply_operator = scope.declare_local_operator(
-                    'SklearnMultiply')
-                multiply_operator.inputs.append(var_out)
-                multiply_operator.operand = model.transformer_weights[name]
-                var_out = scope.declare_local_variable(
-                    'multiply_output', guess_tensor_type(inputs[0].type))
-                multiply_operator.outputs.append(var_out)
-        if var_out:
-            transformed_result_names.append(var_out)
-
-    # Create a Concat ONNX node
-    if len(transformed_result_names) > 1:
-        ty = transformed_result_names[0].type.__class__([None, None])
-        concat_operator = scope.declare_local_operator('SklearnConcat')
-        concat_operator.inputs = transformed_result_names
-
-        # Declare output name of scikit-learn ColumnTransformer
-        transformed_column_name = scope.declare_local_variable(
-            'transformed_column', ty)
-        concat_operator.outputs.append(transformed_column_name)
-        return concat_operator.outputs
-    return transformed_result_names
-
-
-def _parse_sklearn_grid_search_cv(scope, model, inputs, custom_parsers=None):
-    options = scope.get_options(model)
-    if options:
-        scope.add_options(id(model.best_estimator_), options)
-    res = parse_sklearn(scope, model.best_estimator_, inputs,
-                        custom_parsers=custom_parsers)
-    scope.replace_raw_operator(
-        model.best_estimator_, model, "SklearnGridSearchCV")
-    return res
-
-
-def _parse_sklearn_random_trees_embedding(scope, model, inputs,
-                                          custom_parsers=None):
-    res = parse_sklearn(scope, model.base_estimator_, inputs,
-                        custom_parsers=custom_parsers)
-    if len(res) != 1:
-        raise RuntimeError(
-            "A regressor only produces one output not %r." % res)
-    scope.replace_raw_operator(
-        model.base_estimator_, model, "SklearnRandomTreesEmbedding")
-    return res
-
-
-def _apply_zipmap(zipmap_options, scope, model, input_type,
-                  probability_tensor):
-    if zipmap_options == 'columns':
-        zipmap_operator = scope.declare_local_operator('SklearnZipMapColumns')
-        classes = get_label_classes(scope, model)
-        classes_names = get_label_classes(scope, model, node_names=True)
-    else:
-        zipmap_operator = scope.declare_local_operator('SklearnZipMap')
-        classes = get_label_classes(scope, model)
-
-    zipmap_operator.inputs = probability_tensor
-    label_type = Int64TensorType([None])
-
-    if (isinstance(model.classes_, list) and
-            isinstance(model.classes_[0], np.ndarray)):
-        # multi-label problem
-        pass
-    elif np.issubdtype(classes.dtype, np.floating):
-        classes = np.array(list(map(lambda x: int(x), classes)))
-        if set(map(lambda x: float(x), classes)) != set(model.classes_):
-            raise RuntimeError("skl2onnx implicitly converts float class "
-                               "labels into integers but at least one label "
-                               "is not an integer. Class labels should "
-                               "be integers or strings.")
-        zipmap_operator.classlabels_int64s = classes
-    elif np.issubdtype(classes.dtype, np.signedinteger):
-        zipmap_operator.classlabels_int64s = classes
-    elif np.issubdtype(classes.dtype, np.unsignedinteger):
-        zipmap_operator.classlabels_int64s = classes
-    else:
-        classes = np.array([s.encode('utf-8') for s in classes])
-        zipmap_operator.classlabels_strings = classes
-        label_type = StringTensorType([None])
-
-    zip_label = scope.declare_local_variable('output_label', label_type)
-    if len(probability_tensor) == 2:
-        zipmap_operator.outputs.append(zip_label)
-
-    if zipmap_options == 'columns':
-        prob_type = probability_tensor[-1].type
-        for cl in classes_names:
-            output_cl = scope.declare_local_variable(cl, prob_type.__class__())
-            zipmap_operator.outputs.append(output_cl)
-    else:
-        zip_probability = scope.declare_local_variable(
-            'output_probability',
-            SequenceType(
-                DictionaryType(
-                    label_type, guess_tensor_type(input_type))))
-        zipmap_operator.outputs.append(zip_probability)
-
-    zipmap_operator.init_status(is_evaluated=True)
-    return zipmap_operator.outputs
-
-
-def _parse_sklearn_classifier(scope, model, inputs, custom_parsers=None):
-    options = scope.get_options(model, dict(zipmap=True))
-    no_zipmap = (
-        (isinstance(options['zipmap'], bool) and not options['zipmap']) or
-        (model.__class__ in [NuSVC, SVC] and not model.probability))
-    probability_tensor = _parse_sklearn_simple_model(
-        scope, model, inputs, custom_parsers=custom_parsers)
-
-    if no_zipmap:
-        if options.get('output_class_labels', False):
-            if not hasattr(model, "classes_"):
-                raise RuntimeError(
-                    "Model type %r has no attribute 'classes_'. "
-                    "Option 'output_class_labels' is invalid or a new parser "
-                    "must be used." % model.__class__.__name__)
-
-            clout = scope.declare_local_operator('SklearnClassLabels')
-            clout.classes = get_label_classes(scope, model)
-            if model.classes_.dtype in (np.int32, np.int64):
-                ctype = Int64TensorType
-            else:
-                ctype = StringTensorType
-            label_type = ctype(clout.classes.shape)
-            class_labels = scope.declare_local_variable(
-                'class_labels', label_type)
-            clout.outputs.append(class_labels)
-            outputs = list(probability_tensor)
-            outputs.append(class_labels)
-            return outputs
-        return probability_tensor
-
-    if options.get('output_class_labels', False):
-        raise RuntimeError(
-            "Option 'output_class_labels' is not compatible with option "
-            "'zipmap'.")
-
-    return _apply_zipmap(
-        options['zipmap'], scope, model, inputs[0].type, probability_tensor)
-
-
-def _parse_sklearn_multi_output_classifier(scope, model, inputs,
-                                           custom_parsers=None):
-    options = scope.get_options(model, dict(zipmap=True))
-    if options['zipmap']:
-        warnings.warn(
-            "Option zipmap is ignored for model %r. "
-            "Set option zipmap to False to "
-            "remove this message." % type(model),
-            UserWarning)
-    alias = _get_sklearn_operator_name(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-    this_operator.inputs = inputs
-
-    if hasattr(model, 'classes_'):
-        classes = model.classes_
-    else:
-        classes = [get_label_classes(scope, m) for m in model.estimators_]
-    if len(set(cl.dtype for cl in classes)) != 1:
-        raise RuntimeError(
-            "Class labels may have only one type %r."
-            "" % set(cl.dtype for cl in classes))
-    if classes[0].dtype in (np.int32, np.int64):
-        ctype = Int64TensorType
-    else:
-        ctype = StringTensorType
-
-    label = scope.declare_local_variable("label", ctype())
-    proba = scope.declare_local_variable(
-        "probabilities", SequenceType(guess_tensor_type(inputs[0].type)))
-    this_operator.outputs.append(label)
-    this_operator.outputs.append(proba)
-
-    options = scope.get_options(model)
-    if options.get('output_class_labels', False):
-        clout = scope.declare_local_operator('SklearnClassLabels')
-        clout.is_multi_output = True
-        clout.classes = classes
-        class_labels = scope.declare_local_variable(
-            "class_labels",
-            SequenceType(ctype()))
-        clout.outputs.append(class_labels)
-        return list(this_operator.outputs) + [class_labels]
-
-    return this_operator.outputs
-
-
-def _parse_sklearn_gaussian_process(scope, model, inputs, custom_parsers=None):
-    options = scope.get_options(
-        model, dict(return_cov=False, return_std=False))
-    if options['return_std'] and options['return_cov']:
-        raise RuntimeError(
-            "Not returning standard deviation of predictions when "
-            "returning full covariance.")
-
-    alias = _get_sklearn_operator_name(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-    mean_tensor = scope.declare_local_variable(
-        "GPmean", guess_tensor_type(inputs[0].type))
-    this_operator.inputs = inputs
-    this_operator.outputs.append(mean_tensor)
-
-    if options['return_std'] or options['return_cov']:
-        # covariance or standard deviation
-        covstd_tensor = scope.declare_local_variable(
-            'GPcovstd', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(covstd_tensor)
-    return this_operator.outputs
-
-
-def _parse_sklearn_bayesian_ridge(scope, model, inputs, custom_parsers=None):
-    options = scope.get_options(model, dict(return_std=False))
-    alias = _get_sklearn_operator_name(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-    mean_tensor = scope.declare_local_variable(
-        "variable", guess_tensor_type(inputs[0].type))
-    this_operator.inputs = inputs
-    this_operator.outputs.append(mean_tensor)
-
-    if options['return_std']:
-        # covariance or standard deviation
-        covstd_tensor = scope.declare_local_variable(
-            'std', guess_tensor_type(inputs[0].type))
-        this_operator.outputs.append(covstd_tensor)
-    return this_operator.outputs
-
-
-def _parse_sklearn(scope, model, inputs, custom_parsers=None, alias=None):
-    """
-    This is a delegate function. It does nothing but invokes the
-    correct parsing function according to the input model's type.
-
-    :param scope: Scope object
-    :param model: A scikit-learn object (e.g., OneHotEncoder
-        and LogisticRegression)
-    :param inputs: A list of variables
-    :param custom_parsers: parsers determines which outputs is expected
-        for which particular task, default parsers are defined for
-        classifiers, regressors, pipeline but they can be rewritten,
-        *custom_parsers* is a dictionary ``{ type: fct_parser(scope,
-        model, inputs, custom_parsers=None) }``
-    :param alias: alias of the model (None if based on the model class)
-    :return: The output variables produced by the input model
-    """
-    for i, inp in enumerate(inputs):
-        if not isinstance(inp, Variable):
-            raise TypeError(
-                "Unexpected input type %r for input %r: %r." % (
-                    type(inp), i, inp))
-
-    if alias is not None:
-        outputs = _parse_sklearn_simple_model(scope, model, inputs,
-                                              custom_parsers=custom_parsers,
-                                              alias=alias)
-        return outputs
-
-    tmodel = type(model)
-    if custom_parsers is not None and tmodel in custom_parsers:
-        outputs = custom_parsers[tmodel](scope, model, inputs,
-                                         custom_parsers=custom_parsers)
-    elif tmodel in sklearn_parsers_map:
-        outputs = sklearn_parsers_map[tmodel](scope, model, inputs,
-                                              custom_parsers=custom_parsers)
-    elif isinstance(model, pipeline.Pipeline):
-        parser = sklearn_parsers_map[pipeline.Pipeline]
-        outputs = parser(scope, model, inputs, custom_parsers=custom_parsers)
-    else:
-        outputs = _parse_sklearn_simple_model(scope, model, inputs,
-                                              custom_parsers=custom_parsers)
-    return outputs
-
-
-
[docs]def parse_sklearn(scope, model, inputs, custom_parsers=None, final_types=None): - """ - This is a delegate function. It does nothing but invokes the - correct parsing function according to the input model's type. - - :param scope: Scope object - :param model: A scikit-learn object (e.g., OneHotEncoder - and LogisticRegression) - :param inputs: A list of variables - :param custom_parsers: parsers determines which outputs is expected - for which particular task, default parsers are defined for - classifiers, regressors, pipeline but they can be rewritten, - *custom_parsers* is a dictionary ``{ type: fct_parser(scope, - model, inputs, custom_parsers=None) }`` - :param final_types: a python list. Works the same way as initial_types - but not mandatory, it is used to overwrites the type - (if type is not None) and the name of every output. - :return: The output variables produced by the input model - """ - if final_types is not None: - outputs = [] - for name, ty in final_types: - var = scope.declare_local_output(name, ty, missing_type=True) - if var.onnx_name != name: - raise RuntimeError( - "Unable to add duplicated output '{}', '{}'. " - "Output and input must have different names." - "".format(var.onnx_name, name)) - outputs.append(var) - - hidden_outputs = _parse_sklearn( - scope, model, inputs, custom_parsers=custom_parsers) - - if len(hidden_outputs) != len(outputs): - raise RuntimeError( - "Number of declared outputs is unexpected, declared '{}' " - "found '{}'.".format( - ", ".join(_.onnx_name for _ in outputs), - ", ".join(_.onnx_name for _ in hidden_outputs))) - for h, o in zip(hidden_outputs, outputs): - if o.type is None: - iop = scope.declare_local_operator('SklearnIdentity') - else: - iop = scope.declare_local_operator('SklearnCast') - iop.inputs = [h] - iop.outputs = [o] - h.init_status(is_leaf=False) - o.init_status(is_leaf=True) - if o.type is None and h.type is not None: - o.type = h.type - return outputs - - res = _parse_sklearn( - scope, model, inputs, custom_parsers=custom_parsers) - for r in res: - r.init_status(is_leaf=True) - return res
- - -
[docs]def parse_sklearn_model(model, initial_types=None, target_opset=None, - custom_conversion_functions=None, - custom_shape_calculators=None, - custom_parsers=None, - options=None, white_op=None, - black_op=None, final_types=None, - naming=None): - """ - Puts *scikit-learn* object into an abstract container so that - our framework can work seamlessly on models created - with different machine learning tools. - - :param model: A scikit-learn model - :param initial_types: a python list. Each element is a tuple of a - variable name and a type defined in data_types.py - :param target_opset: number, for example, 7 for ONNX 1.2, - and 8 for ONNX 1.3. - :param custom_conversion_functions: a dictionary for specifying - the user customized conversion function if not registered - :param custom_shape_calculators: a dictionary for specifying the - user customized shape calculator if not registered - :param custom_parsers: parsers determines which outputs is expected - for which particular task, default parsers are defined for - classifiers, regressors, pipeline but they can be rewritten, - *custom_parsers* is a dictionary - ``{ type: fct_parser(scope, model, inputs, custom_parsers=None) }`` - :param options: specific options given to converters - (see :ref:`l-conv-options`) - :param white_op: white list of ONNX nodes allowed - while converting a pipeline, if empty, all are allowed - :param black_op: black list of ONNX nodes allowed - while converting a pipeline, if empty, none are blacklisted - :param final_types: a python list. Works the same way as initial_types - but not mandatory, it is used to overwrites the type - (if type is not None) and the name of every output. - :param naming: the user may want to change the way intermediate - are named, this parameter can be a string (a prefix) or a - function, which signature is the following: - `get_name(name, existing_names)`, the library will then - check this name is unique and modify it if not - :return: :class:`Topology <skl2onnx.common._topology.Topology>` - - .. versionchanged:: 1.10.0 - Parameter *naming* was added. - """ - options = _process_options(model, options) - - raw_model_container = SklearnModelContainerNode( - model, white_op=white_op, black_op=black_op) - - # Declare a computational graph. It will become a representation of - # the input scikit-learn model after parsing. - topology = Topology( - raw_model_container, initial_types=initial_types, - target_opset=target_opset, - custom_conversion_functions=custom_conversion_functions, - custom_shape_calculators=custom_shape_calculators, - registered_models=dict( - conv=_converter_pool, shape=_shape_calculator_pool, - aliases=sklearn_operator_name_map)) - - # Declare an object to provide variables' and operators' naming mechanism. - scope = topology.declare_scope('__root__', options=options, naming=naming) - inputs = scope.input_variables - - # The object raw_model_container is a part of the topology - # we're going to return. We use it to store the inputs of - # the scikit-learn's computational graph. - for variable in inputs: - variable.init_status(is_root=True) - raw_model_container.add_input(variable) - - # Parse the input scikit-learn model as a Topology object. - outputs = parse_sklearn(scope, model, inputs, - custom_parsers=custom_parsers, - final_types=final_types) - - # The object raw_model_container is a part of the topology we're - # going to return. We use it to store the outputs of the - # scikit-learn's computational graph. - if final_types is not None and len(final_types) != len(outputs): - raise RuntimeError( - "Unexpected number of outputs, expected %d, got %d " - "after parsing." % (len(final_types), len(outputs))) - return topology
- - -def build_sklearn_parsers_map(): - map_parser = { - pipeline.Pipeline: _parse_sklearn_pipeline, - pipeline.FeatureUnion: _parse_sklearn_feature_union, - BayesianRidge: _parse_sklearn_bayesian_ridge, - GaussianProcessRegressor: _parse_sklearn_gaussian_process, - GridSearchCV: _parse_sklearn_grid_search_cv, - MultiOutputClassifier: _parse_sklearn_multi_output_classifier, - RandomTreesEmbedding: _parse_sklearn_random_trees_embedding, - } - if ColumnTransformer is not None: - map_parser[ColumnTransformer] = _parse_sklearn_column_transformer - - for tmodel in sklearn_classifier_list: - if tmodel not in [LinearSVC]: - map_parser[tmodel] = _parse_sklearn_classifier - return map_parser - - -
[docs]def update_registered_parser(model, parser_fct): - """ - Registers or updates a parser for a new model. - A parser returns the expected output of a model. - - :param model: model class - :param parser_fct: parser, signature is the same as - :func:`parse_sklearn <skl2onnx._parse.parse_sklearn>` - """ - check_signature(parser_fct, _parse_sklearn_classifier) - sklearn_parsers_map[model] = parser_fct
- - -# registered parsers -sklearn_parsers_map = build_sklearn_parsers_map() -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx._parse - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx._parse

+# SPDX-License-Identifier: Apache-2.0
+
+import warnings
+import numpy as np
+
+from sklearn import pipeline
+from sklearn.base import (
+    ClassifierMixin, ClusterMixin, is_classifier)
+try:
+    from sklearn.base import OutlierMixin
+except ImportError:
+    # scikit-learn <= 0.19
+    class OutlierMixin:
+        pass
+
+from sklearn.ensemble import (
+    IsolationForest, RandomTreesEmbedding, RandomForestClassifier)
+from sklearn.gaussian_process import GaussianProcessRegressor
+from sklearn.linear_model import BayesianRidge
+from sklearn.model_selection import GridSearchCV
+from sklearn.neighbors import NearestNeighbors, LocalOutlierFactor
+from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
+from sklearn.multioutput import MultiOutputClassifier
+from sklearn.preprocessing import OneHotEncoder
+from sklearn.pipeline import Pipeline
+from sklearn.svm import LinearSVC, NuSVC, SVC
+try:
+    from sklearn.compose import ColumnTransformer
+except ImportError:
+    # ColumnTransformer was introduced in 0.20.
+    ColumnTransformer = None
+try:
+    from sklearn.preprocessing import Imputer
+except ImportError:
+    Imputer = None
+try:
+    from sklearn.impute import SimpleImputer
+except ImportError:
+    # changed in 0.20
+    SimpleImputer = None
+
+from ._supported_operators import (
+    _get_sklearn_operator_name, cluster_list, outlier_list)
+from ._supported_operators import (
+    sklearn_classifier_list, sklearn_operator_name_map)
+from .common._container import SklearnModelContainerNode
+from .common._registration import _converter_pool, _shape_calculator_pool
+from .common._topology import Topology, Variable
+from .common.data_types import (
+    DictionaryType, Int64TensorType, SequenceType,
+    StringTensorType, TensorType, FloatTensorType,
+    guess_tensor_type)
+from .common.utils import get_column_indices
+from .common.utils_checking import check_signature
+from .common.utils_classifier import get_label_classes
+from .common.utils_sklearn import _process_options
+
+
+do_not_merge_columns = tuple(
+    filter(lambda op: op is not None,
+           [OneHotEncoder, ColumnTransformer]))
+
+
+def _fetch_input_slice(scope, inputs, column_indices):
+    if not isinstance(inputs, list):
+        raise TypeError("Parameter inputs must be a list.")
+    if len(inputs) == 0:
+        raise RuntimeError("Operator ArrayFeatureExtractor requires at "
+                           "least one inputs.")
+    if len(inputs) != 1:
+        raise RuntimeError("Operator ArrayFeatureExtractor does not support "
+                           "multiple input tensors.")
+    if (isinstance(inputs[0].type, TensorType) and
+            len(inputs[0].type.shape) == 2 and
+            inputs[0].type.shape[1] == len(column_indices)):
+        # No need to extract.
+        return inputs
+    array_feature_extractor_operator = scope.declare_local_operator(
+        'SklearnArrayFeatureExtractor')
+    array_feature_extractor_operator.inputs = inputs
+    array_feature_extractor_operator.column_indices = column_indices
+    output_variable_name = scope.declare_local_variable(
+        'extracted_feature_columns', inputs[0].type)
+    array_feature_extractor_operator.outputs.append(output_variable_name)
+    return array_feature_extractor_operator.outputs
+
+
+def _parse_sklearn_simple_model(scope, model, inputs, custom_parsers=None,
+                                alias=None):
+    """
+    This function handles all non-pipeline models.
+
+    :param scope: Scope object
+    :param model: A scikit-learn object (e.g., *OneHotEncoder*
+        or *LogisticRegression*)
+    :param inputs: A list of variables
+    :param custom_parsers: dictionary of custom parsers
+    :param alias: use this alias instead of the one based on the class name
+    :return: A list of output variables which will be passed to next
+        stage
+    """
+    # alias can be None
+    if isinstance(model, str):
+        raise RuntimeError("Parameter model must be an object not a "
+                           "string '{0}'.".format(model))
+    if any(not isinstance(i, Variable) for i in inputs):
+        raise TypeError(
+            "One input is not a Variable for model %r - %r."
+            "" % (model, inputs))
+    if alias is None:
+        alias = _get_sklearn_operator_name(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    this_operator.inputs = inputs
+
+    if hasattr(model, 'onnx_parser'):
+        parser_names = model.onnx_parser()
+        if parser_names is not None:
+            try:
+                names = parser_names(scope=scope, inputs=inputs)
+            except TypeError as e:
+                warnings.warn(
+                    "Calling parser %r for model type %r failed due to %r. "
+                    "This warnings will become an exception in version 1.11. "
+                    "The parser signature should parser(scope=None, "
+                    "inputs=None)." % (
+                        parser_names, e, type(model)),
+                    DeprecationWarning)
+                names = parser_names()
+            if names is not None:
+                for name in names:
+                    if isinstance(name, Variable):
+                        this_operator.outputs.append(name)
+                    elif isinstance(name, str):
+                        var = scope.declare_local_variable(
+                            name, guess_tensor_type(inputs[0].type))
+                        this_operator.outputs.append(var)
+                    elif isinstance(name, tuple) and len(name) == 2:
+                        var = scope.declare_local_variable(
+                            name[0], guess_tensor_type(name[1]))
+                        this_operator.outputs.append(var)
+                    else:
+                        raise RuntimeError(
+                            "Unexpected output type %r (value=%r) for "
+                            "operator %r." % (
+                                type(name), name, type(model)))
+                return this_operator.outputs
+
+    if (type(model) in sklearn_classifier_list
+            or isinstance(model, ClassifierMixin)
+            or (isinstance(model, GridSearchCV)
+                and is_classifier(model))):
+        # For classifiers, we may have two outputs, one for label and
+        # the other one for probabilities of all classes. Notice that
+        # their types here are not necessarily correct and they will
+        # be fixed in shape inference phase.
+        label_variable = scope.declare_local_variable(
+            'label', Int64TensorType())
+        if type(model) in [RandomForestClassifier]:
+            prob_dtype = FloatTensorType()
+        else:
+            prob_dtype = guess_tensor_type(inputs[0].type)
+        probability_tensor_variable = scope.declare_local_variable(
+            'probabilities', prob_dtype)
+        this_operator.outputs.append(label_variable)
+        this_operator.outputs.append(probability_tensor_variable)
+
+    elif type(model) in cluster_list or isinstance(model, ClusterMixin):
+        # For clustering, we may have two outputs, one for label and
+        # the other one for scores of all classes. Notice that their
+        # types here are not necessarily correct and they will be fixed
+        # in shape inference phase
+        label_variable = scope.declare_local_variable(
+            'label', Int64TensorType())
+        score_tensor_variable = scope.declare_local_variable(
+            'scores', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(label_variable)
+        this_operator.outputs.append(score_tensor_variable)
+
+    elif type(model) in {IsolationForest, LocalOutlierFactor}:
+        label_variable = scope.declare_local_variable(
+            'label', Int64TensorType())
+        score_tensor_variable = scope.declare_local_variable(
+            'scores', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(label_variable)
+        this_operator.outputs.append(score_tensor_variable)
+        options = scope.get_options(model, dict(score_samples=False))
+        if options['score_samples']:
+            scores_var = scope.declare_local_variable(
+                'score_samples', guess_tensor_type(inputs[0].type))
+            this_operator.outputs.append(scores_var)
+
+    elif type(model) in outlier_list or isinstance(model, OutlierMixin):
+        # For outliers, we may have two outputs, one for label and
+        # the other one for scores.
+        label_variable = scope.declare_local_variable(
+            'label', Int64TensorType())
+        score_tensor_variable = scope.declare_local_variable(
+            'scores', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(label_variable)
+        this_operator.outputs.append(score_tensor_variable)
+
+    elif isinstance(model, NearestNeighbors):
+        # For Nearest Neighbours, we have two outputs, one for nearest
+        # neighbours' indices and the other one for distances
+        index_variable = scope.declare_local_variable(
+            'index', Int64TensorType())
+        distance_variable = scope.declare_local_variable(
+            'distance', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(index_variable)
+        this_operator.outputs.append(distance_variable)
+
+    elif type(model) in {GaussianMixture, BayesianGaussianMixture}:
+        label_variable = scope.declare_local_variable(
+            'label', Int64TensorType())
+        prob_variable = scope.declare_local_variable(
+            'probabilities', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(label_variable)
+        this_operator.outputs.append(prob_variable)
+        options = scope.get_options(model, dict(score_samples=False))
+        if options['score_samples']:
+            scores_var = scope.declare_local_variable(
+                'score_samples', guess_tensor_type(inputs[0].type))
+            this_operator.outputs.append(scores_var)
+    elif type(model) in {SimpleImputer, Imputer}:
+        if isinstance(inputs[0].type, (Int64TensorType, StringTensorType)):
+            otype = inputs[0].type.__class__()
+        else:
+            otype = guess_tensor_type(inputs[0].type)
+        variable = scope.declare_local_variable('variable', otype)
+        this_operator.outputs.append(variable)
+    else:
+        if hasattr(model, 'get_feature_names_out'):
+            try:
+                out_names = model.get_feature_names_out()
+            except (AttributeError, ValueError):
+                # Catch a bug in scikit-learn.
+                out_names = None
+            this_operator.feature_names_out_ = out_names
+        input_type = guess_tensor_type(inputs[0].type)
+        variable = scope.declare_local_variable(
+            'variable', input_type)
+        this_operator.outputs.append(variable)
+
+    options = scope.get_options(model, dict(decision_path=False), fail=False)
+    if options is not None and options['decision_path']:
+        dec_path = scope.declare_local_variable(
+            'decision_path', StringTensorType())
+        this_operator.outputs.append(dec_path)
+
+    options = scope.get_options(model, dict(decision_leaf=False), fail=False)
+    if options is not None and options['decision_leaf']:
+        dec_path = scope.declare_local_variable(
+            'decision_leaf', Int64TensorType())
+        this_operator.outputs.append(dec_path)
+
+    return this_operator.outputs
+
+
+def _parse_sklearn_pipeline(scope, model, inputs, custom_parsers=None):
+    """
+    The basic ideas of scikit-learn parsing:
+        1. Sequentially go though all stages defined in the considered
+           scikit-learn pipeline
+        2. The output variables of one stage will be fed into its next
+           stage as the inputs.
+
+    :param scope: Scope object defined in _topology.py
+    :param model: scikit-learn pipeline object
+    :param inputs: A list of Variable objects
+    :return: A list of output variables produced by the input pipeline
+    """
+    for step in model.steps:
+        inputs = _parse_sklearn(scope, step[1], inputs,
+                                custom_parsers=custom_parsers)
+    return inputs
+
+
+def _parse_sklearn_feature_union(scope, model, inputs, custom_parsers=None):
+    """
+    :param scope: Scope object
+    :param model: A scikit-learn FeatureUnion object
+    :param inputs: A list of Variable objects
+    :return: A list of output variables produced by feature union
+    """
+    # Output variable name of each transform. It's a list of string.
+    transformed_result_names = []
+    # Encode each transform as our IR object
+    for name, transform in model.transformer_list:
+        transformed_result_names.append(
+            _parse_sklearn(
+                scope, transform, inputs,
+                custom_parsers=custom_parsers)[0])
+        if (model.transformer_weights is not None and name in
+                model.transformer_weights):
+            transform_result = [transformed_result_names.pop()]
+            # Create a Multiply ONNX node
+            multiply_operator = scope.declare_local_operator('SklearnMultiply')
+            multiply_operator.inputs = transform_result
+            multiply_operator.operand = model.transformer_weights[name]
+            multiply_output = scope.declare_local_variable(
+                'multiply_output', guess_tensor_type(inputs[0].type))
+            multiply_operator.outputs.append(multiply_output)
+            transformed_result_names.append(multiply_operator.outputs[0])
+
+    # Create a Concat ONNX node
+    concat_operator = scope.declare_local_operator('SklearnConcat')
+    concat_operator.inputs = transformed_result_names
+
+    # Declare output name of scikit-learn FeatureUnion
+    union_name = scope.declare_local_variable(
+        'union', guess_tensor_type(inputs[0].type))
+    concat_operator.outputs.append(union_name)
+
+    return concat_operator.outputs
+
+
+def _parse_sklearn_column_transformer(scope, model, inputs,
+                                      custom_parsers=None):
+    """
+    :param scope: Scope object
+    :param model: A *scikit-learn* *ColumnTransformer* object
+    :param inputs: A list of Variable objects
+    :return: A list of output variables produced by column transformer
+    """
+    # Output variable name of each transform. It's a list of string.
+    transformed_result_names = []
+    # Encode each transform as our IR object
+    for name, op, column_indices in model.transformers_:
+        if op == 'drop':
+            continue
+        if isinstance(column_indices, slice):
+            column_indices = list(range(
+                column_indices.start
+                if column_indices.start is not None else 0,
+                column_indices.stop, column_indices.step
+                if column_indices.step is not None else 1))
+        elif isinstance(column_indices, (int, str)):
+            column_indices = [column_indices]
+        names = get_column_indices(column_indices, inputs, multiple=True)
+        transform_inputs = []
+        for onnx_var, onnx_is in names.items():
+            tr_inputs = _fetch_input_slice(scope, [inputs[onnx_var]], onnx_is)
+            transform_inputs.extend(tr_inputs)
+
+        merged_cols = False
+        if len(transform_inputs) > 1:
+            if isinstance(op, Pipeline):
+                if not isinstance(op.steps[0][1], do_not_merge_columns):
+                    merged_cols = True
+            elif not isinstance(op, do_not_merge_columns):
+                merged_cols = True
+
+        if merged_cols:
+            # Many ONNX operators expect one input vector,
+            # the default behaviour is to merge columns.
+            ty = transform_inputs[0].type.__class__([None, None])
+
+            conc_op = scope.declare_local_operator('SklearnConcat')
+            conc_op.inputs = transform_inputs
+            conc_names = scope.declare_local_variable('merged_columns', ty)
+            conc_op.outputs.append(conc_names)
+            transform_inputs = [conc_names]
+
+        model_obj = model.named_transformers_[name]
+        if isinstance(model_obj, str):
+            if model_obj == "passthrough":
+                var_out = transform_inputs[0]
+            elif model_obj == "drop":
+                var_out = None
+            else:
+                raise RuntimeError("Unknown operator alias "
+                                   "'{0}'. These are specified in "
+                                   "_supported_operators.py."
+                                   "".format(model_obj))
+        else:
+            var_out = _parse_sklearn(
+                scope, model_obj,
+                transform_inputs, custom_parsers=custom_parsers)[0]
+            if (model.transformer_weights is not None and name in
+                    model.transformer_weights):
+                # Create a Multiply ONNX node
+                multiply_operator = scope.declare_local_operator(
+                    'SklearnMultiply')
+                multiply_operator.inputs.append(var_out)
+                multiply_operator.operand = model.transformer_weights[name]
+                var_out = scope.declare_local_variable(
+                    'multiply_output', guess_tensor_type(inputs[0].type))
+                multiply_operator.outputs.append(var_out)
+        if var_out:
+            transformed_result_names.append(var_out)
+
+    # Create a Concat ONNX node
+    if len(transformed_result_names) > 1:
+        ty = transformed_result_names[0].type.__class__([None, None])
+        concat_operator = scope.declare_local_operator('SklearnConcat')
+        concat_operator.inputs = transformed_result_names
+
+        # Declare output name of scikit-learn ColumnTransformer
+        transformed_column_name = scope.declare_local_variable(
+            'transformed_column', ty)
+        concat_operator.outputs.append(transformed_column_name)
+        return concat_operator.outputs
+    return transformed_result_names
+
+
+def _parse_sklearn_grid_search_cv(scope, model, inputs, custom_parsers=None):
+    options = scope.get_options(model)
+    if options:
+        scope.add_options(id(model.best_estimator_), options)
+    res = parse_sklearn(scope, model.best_estimator_, inputs,
+                        custom_parsers=custom_parsers)
+    scope.replace_raw_operator(
+        model.best_estimator_, model, "SklearnGridSearchCV")
+    return res
+
+
+def _parse_sklearn_random_trees_embedding(scope, model, inputs,
+                                          custom_parsers=None):
+    res = parse_sklearn(scope, model.base_estimator_, inputs,
+                        custom_parsers=custom_parsers)
+    if len(res) != 1:
+        raise RuntimeError(
+            "A regressor only produces one output not %r." % res)
+    scope.replace_raw_operator(
+        model.base_estimator_, model, "SklearnRandomTreesEmbedding")
+    return res
+
+
+def _apply_zipmap(zipmap_options, scope, model, input_type,
+                  probability_tensor):
+    if zipmap_options == 'columns':
+        zipmap_operator = scope.declare_local_operator('SklearnZipMapColumns')
+        classes = get_label_classes(scope, model)
+        classes_names = get_label_classes(scope, model, node_names=True)
+    else:
+        zipmap_operator = scope.declare_local_operator('SklearnZipMap')
+        classes = get_label_classes(scope, model)
+
+    zipmap_operator.inputs = probability_tensor
+    label_type = Int64TensorType([None])
+
+    if (isinstance(model.classes_, list) and
+            isinstance(model.classes_[0], np.ndarray)):
+        # multi-label problem
+        pass
+    elif np.issubdtype(classes.dtype, np.floating):
+        classes = np.array(list(map(lambda x: int(x), classes)))
+        if set(map(lambda x: float(x), classes)) != set(model.classes_):
+            raise RuntimeError("skl2onnx implicitly converts float class "
+                               "labels into integers but at least one label "
+                               "is not an integer. Class labels should "
+                               "be integers or strings.")
+        zipmap_operator.classlabels_int64s = classes
+    elif np.issubdtype(classes.dtype, np.signedinteger):
+        zipmap_operator.classlabels_int64s = [int(i) for i in classes]
+    elif (np.issubdtype(classes.dtype, np.unsignedinteger) or
+            classes.dtype == np.bool_):
+        zipmap_operator.classlabels_int64s = [int(i) for i in classes]
+    else:
+        classes = np.array([s.encode('utf-8') for s in classes])
+        zipmap_operator.classlabels_strings = classes
+        label_type = StringTensorType([None])
+
+    zip_label = scope.declare_local_variable('output_label', label_type)
+    if len(probability_tensor) == 2:
+        zipmap_operator.outputs.append(zip_label)
+
+    if zipmap_options == 'columns':
+        prob_type = probability_tensor[-1].type
+        for cl in classes_names:
+            output_cl = scope.declare_local_variable(cl, prob_type.__class__())
+            zipmap_operator.outputs.append(output_cl)
+    else:
+        zip_probability = scope.declare_local_variable(
+            'output_probability',
+            SequenceType(
+                DictionaryType(
+                    label_type, guess_tensor_type(input_type))))
+        zipmap_operator.outputs.append(zip_probability)
+
+    zipmap_operator.init_status(is_evaluated=True)
+    return zipmap_operator.outputs
+
+
+def _parse_sklearn_classifier(scope, model, inputs, custom_parsers=None):
+    options = scope.get_options(model, dict(zipmap=True))
+    no_zipmap = (
+        (isinstance(options['zipmap'], bool) and not options['zipmap']) or
+        (model.__class__ in [NuSVC, SVC] and not model.probability))
+    probability_tensor = _parse_sklearn_simple_model(
+        scope, model, inputs, custom_parsers=custom_parsers)
+
+    if no_zipmap:
+        if options.get('output_class_labels', False):
+            if not hasattr(model, "classes_"):
+                raise RuntimeError(
+                    "Model type %r has no attribute 'classes_'. "
+                    "Option 'output_class_labels' is invalid or a new parser "
+                    "must be used." % model.__class__.__name__)
+
+            clout = scope.declare_local_operator('SklearnClassLabels')
+            clout.classes = get_label_classes(scope, model)
+            if model.classes_.dtype in (np.int32, np.int64, np.bool_):
+                ctype = Int64TensorType
+            else:
+                ctype = StringTensorType
+            label_type = ctype(clout.classes.shape)
+            class_labels = scope.declare_local_variable(
+                'class_labels', label_type)
+            clout.outputs.append(class_labels)
+            outputs = list(probability_tensor)
+            outputs.append(class_labels)
+            return outputs
+        return probability_tensor
+
+    if options.get('output_class_labels', False):
+        raise RuntimeError(
+            "Option 'output_class_labels' is not compatible with option "
+            "'zipmap'.")
+
+    return _apply_zipmap(
+        options['zipmap'], scope, model, inputs[0].type, probability_tensor)
+
+
+def _parse_sklearn_multi_output_classifier(scope, model, inputs,
+                                           custom_parsers=None):
+    options = scope.get_options(model, dict(zipmap=True))
+    if options['zipmap']:
+        warnings.warn(
+            "Option zipmap is ignored for model %r. "
+            "Set option zipmap to False to "
+            "remove this message." % type(model),
+            UserWarning)
+    alias = _get_sklearn_operator_name(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    this_operator.inputs = inputs
+
+    if hasattr(model, 'classes_'):
+        classes = model.classes_
+    else:
+        classes = [get_label_classes(scope, m) for m in model.estimators_]
+    if len(set(cl.dtype for cl in classes)) != 1:
+        raise RuntimeError(
+            "Class labels may have only one type %r."
+            "" % set(cl.dtype for cl in classes))
+    if classes[0].dtype in (np.int32, np.int64, np.bool_):
+        ctype = Int64TensorType
+    else:
+        ctype = StringTensorType
+
+    label = scope.declare_local_variable("label", ctype())
+    proba = scope.declare_local_variable(
+        "probabilities", SequenceType(guess_tensor_type(inputs[0].type)))
+    this_operator.outputs.append(label)
+    this_operator.outputs.append(proba)
+
+    options = scope.get_options(model)
+    if options.get('output_class_labels', False):
+        clout = scope.declare_local_operator('SklearnClassLabels')
+        clout.is_multi_output = True
+        clout.classes = classes
+        class_labels = scope.declare_local_variable(
+            "class_labels",
+            SequenceType(ctype()))
+        clout.outputs.append(class_labels)
+        return list(this_operator.outputs) + [class_labels]
+
+    return this_operator.outputs
+
+
+def _parse_sklearn_gaussian_process(scope, model, inputs, custom_parsers=None):
+    options = scope.get_options(
+        model, dict(return_cov=False, return_std=False))
+    if options['return_std'] and options['return_cov']:
+        raise RuntimeError(
+            "Not returning standard deviation of predictions when "
+            "returning full covariance.")
+
+    alias = _get_sklearn_operator_name(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    mean_tensor = scope.declare_local_variable(
+        "GPmean", guess_tensor_type(inputs[0].type))
+    this_operator.inputs = inputs
+    this_operator.outputs.append(mean_tensor)
+
+    if options['return_std'] or options['return_cov']:
+        # covariance or standard deviation
+        covstd_tensor = scope.declare_local_variable(
+            'GPcovstd', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(covstd_tensor)
+    return this_operator.outputs
+
+
+def _parse_sklearn_bayesian_ridge(scope, model, inputs, custom_parsers=None):
+    options = scope.get_options(model, dict(return_std=False))
+    alias = _get_sklearn_operator_name(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    mean_tensor = scope.declare_local_variable(
+        "variable", guess_tensor_type(inputs[0].type))
+    this_operator.inputs = inputs
+    this_operator.outputs.append(mean_tensor)
+
+    if options['return_std']:
+        # covariance or standard deviation
+        covstd_tensor = scope.declare_local_variable(
+            'std', guess_tensor_type(inputs[0].type))
+        this_operator.outputs.append(covstd_tensor)
+    return this_operator.outputs
+
+
+def _parse_sklearn(scope, model, inputs, custom_parsers=None, alias=None):
+    """
+    This is a delegate function. It does nothing but invokes the
+    correct parsing function according to the input model's type.
+
+    :param scope: Scope object
+    :param model: A scikit-learn object (e.g., OneHotEncoder
+        and LogisticRegression)
+    :param inputs: A list of variables
+    :param custom_parsers: parsers determines which outputs is expected
+        for which particular task, default parsers are defined for
+        classifiers, regressors, pipeline but they can be rewritten,
+        *custom_parsers* is a dictionary ``{ type: fct_parser(scope,
+        model, inputs, custom_parsers=None) }``
+    :param alias: alias of the model (None if based on the model class)
+    :return: The output variables produced by the input model
+    """
+    for i, inp in enumerate(inputs):
+        if not isinstance(inp, Variable):
+            raise TypeError(
+                "Unexpected input type %r for input %r: %r." % (
+                    type(inp), i, inp))
+
+    if alias is not None:
+        outputs = _parse_sklearn_simple_model(scope, model, inputs,
+                                              custom_parsers=custom_parsers,
+                                              alias=alias)
+        return outputs
+
+    tmodel = type(model)
+    if custom_parsers is not None and tmodel in custom_parsers:
+        outputs = custom_parsers[tmodel](scope, model, inputs,
+                                         custom_parsers=custom_parsers)
+    elif tmodel in sklearn_parsers_map:
+        outputs = sklearn_parsers_map[tmodel](scope, model, inputs,
+                                              custom_parsers=custom_parsers)
+    elif isinstance(model, pipeline.Pipeline):
+        parser = sklearn_parsers_map[pipeline.Pipeline]
+        outputs = parser(scope, model, inputs, custom_parsers=custom_parsers)
+    else:
+        outputs = _parse_sklearn_simple_model(scope, model, inputs,
+                                              custom_parsers=custom_parsers)
+    return outputs
+
+
+
[docs]def parse_sklearn(scope, model, inputs, custom_parsers=None, final_types=None): + """ + This is a delegate function. It does nothing but invokes the + correct parsing function according to the input model's type. + + :param scope: Scope object + :param model: A scikit-learn object (e.g., OneHotEncoder + and LogisticRegression) + :param inputs: A list of variables + :param custom_parsers: parsers determines which outputs is expected + for which particular task, default parsers are defined for + classifiers, regressors, pipeline but they can be rewritten, + *custom_parsers* is a dictionary ``{ type: fct_parser(scope, + model, inputs, custom_parsers=None) }`` + :param final_types: a python list. Works the same way as initial_types + but not mandatory, it is used to overwrites the type + (if type is not None) and the name of every output. + :return: The output variables produced by the input model + """ + if final_types is not None: + outputs = [] + for name, ty in final_types: + var = scope.declare_local_output(name, ty, missing_type=True) + if var.onnx_name != name: + raise RuntimeError( + "Unable to add duplicated output '{}', '{}'. " + "Output and input must have different names." + "".format(var.onnx_name, name)) + outputs.append(var) + + hidden_outputs = _parse_sklearn( + scope, model, inputs, custom_parsers=custom_parsers) + + if len(hidden_outputs) != len(outputs): + raise RuntimeError( + "Number of declared outputs is unexpected, declared '{}' " + "found '{}'.".format( + ", ".join(_.onnx_name for _ in outputs), + ", ".join(_.onnx_name for _ in hidden_outputs))) + for h, o in zip(hidden_outputs, outputs): + if o.type is None: + iop = scope.declare_local_operator('SklearnIdentity') + else: + iop = scope.declare_local_operator('SklearnCast') + iop.inputs = [h] + iop.outputs = [o] + h.init_status(is_leaf=False) + o.init_status(is_leaf=True) + if o.type is None and h.type is not None: + o.type = h.type + return outputs + + res = _parse_sklearn( + scope, model, inputs, custom_parsers=custom_parsers) + for r in res: + r.init_status(is_leaf=True) + return res
+ + +
[docs]def parse_sklearn_model(model, initial_types=None, target_opset=None, + custom_conversion_functions=None, + custom_shape_calculators=None, + custom_parsers=None, + options=None, white_op=None, + black_op=None, final_types=None, + naming=None): + """ + Puts *scikit-learn* object into an abstract container so that + our framework can work seamlessly on models created + with different machine learning tools. + + :param model: A scikit-learn model + :param initial_types: a python list. Each element is a tuple of a + variable name and a type defined in data_types.py + :param target_opset: number, for example, 7 for ONNX 1.2, + and 8 for ONNX 1.3. + :param custom_conversion_functions: a dictionary for specifying + the user customized conversion function if not registered + :param custom_shape_calculators: a dictionary for specifying the + user customized shape calculator if not registered + :param custom_parsers: parsers determines which outputs is expected + for which particular task, default parsers are defined for + classifiers, regressors, pipeline but they can be rewritten, + *custom_parsers* is a dictionary + ``{ type: fct_parser(scope, model, inputs, custom_parsers=None) }`` + :param options: specific options given to converters + (see :ref:`l-conv-options`) + :param white_op: white list of ONNX nodes allowed + while converting a pipeline, if empty, all are allowed + :param black_op: black list of ONNX nodes allowed + while converting a pipeline, if empty, none are blacklisted + :param final_types: a python list. Works the same way as initial_types + but not mandatory, it is used to overwrites the type + (if type is not None) and the name of every output. + :param naming: the user may want to change the way intermediate + are named, this parameter can be a string (a prefix) or a + function, which signature is the following: + `get_name(name, existing_names)`, the library will then + check this name is unique and modify it if not + :return: :class:`Topology <skl2onnx.common._topology.Topology>` + + .. versionchanged:: 1.10.0 + Parameter *naming* was added. + """ + options = _process_options(model, options) + + raw_model_container = SklearnModelContainerNode( + model, white_op=white_op, black_op=black_op) + + # Declare a computational graph. It will become a representation of + # the input scikit-learn model after parsing. + topology = Topology( + raw_model_container, initial_types=initial_types, + target_opset=target_opset, + custom_conversion_functions=custom_conversion_functions, + custom_shape_calculators=custom_shape_calculators, + registered_models=dict( + conv=_converter_pool, shape=_shape_calculator_pool, + aliases=sklearn_operator_name_map)) + + # Declare an object to provide variables' and operators' naming mechanism. + scope = topology.declare_scope('__root__', options=options, naming=naming) + inputs = scope.input_variables + + # The object raw_model_container is a part of the topology + # we're going to return. We use it to store the inputs of + # the scikit-learn's computational graph. + for variable in inputs: + variable.init_status(is_root=True) + raw_model_container.add_input(variable) + + # Parse the input scikit-learn model as a Topology object. + outputs = parse_sklearn(scope, model, inputs, + custom_parsers=custom_parsers, + final_types=final_types) + + # The object raw_model_container is a part of the topology we're + # going to return. We use it to store the outputs of the + # scikit-learn's computational graph. + if final_types is not None and len(final_types) != len(outputs): + raise RuntimeError( + "Unexpected number of outputs, expected %d, got %d " + "after parsing." % (len(final_types), len(outputs))) + return topology
+ + +def build_sklearn_parsers_map(): + map_parser = { + pipeline.Pipeline: _parse_sklearn_pipeline, + pipeline.FeatureUnion: _parse_sklearn_feature_union, + BayesianRidge: _parse_sklearn_bayesian_ridge, + GaussianProcessRegressor: _parse_sklearn_gaussian_process, + GridSearchCV: _parse_sklearn_grid_search_cv, + MultiOutputClassifier: _parse_sklearn_multi_output_classifier, + RandomTreesEmbedding: _parse_sklearn_random_trees_embedding, + } + if ColumnTransformer is not None: + map_parser[ColumnTransformer] = _parse_sklearn_column_transformer + + for tmodel in sklearn_classifier_list: + if tmodel not in [LinearSVC]: + map_parser[tmodel] = _parse_sklearn_classifier + return map_parser + + +
[docs]def update_registered_parser(model, parser_fct): + """ + Registers or updates a parser for a new model. + A parser returns the expected output of a model. + + :param model: model class + :param parser_fct: parser, signature is the same as + :func:`parse_sklearn <skl2onnx._parse.parse_sklearn>` + """ + check_signature(parser_fct, _parse_sklearn_classifier) + sklearn_parsers_map[model] = parser_fct
+ + +# registered parsers +sklearn_parsers_map = build_sklearn_parsers_map() +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/_supported_operators.html b/_modules/skl2onnx/_supported_operators.html index 55d1a4de2..54ee96406 100644 --- a/_modules/skl2onnx/_supported_operators.html +++ b/_modules/skl2onnx/_supported_operators.html @@ -1,774 +1,879 @@ - - - - - - - - skl2onnx._supported_operators — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx._supported_operators

-# SPDX-License-Identifier: Apache-2.0
-
-
-import warnings
-import logging
-
-# Calibrated classifier CV
-from sklearn.calibration import CalibratedClassifierCV
-
-# Linear classifiers
-from sklearn.linear_model import (
-    LogisticRegression, LogisticRegressionCV,
-    PassiveAggressiveClassifier,
-    Perceptron,
-    RidgeClassifier, RidgeClassifierCV,
-    SGDClassifier
-)
-from sklearn.svm import LinearSVC, OneClassSVM
-
-# Linear regressors
-from sklearn.linear_model import (
-    ARDRegression,
-    BayesianRidge,
-    ElasticNet, ElasticNetCV,
-    HuberRegressor,
-    Lars, LarsCV,
-    Lasso, LassoCV,
-    LassoLars, LassoLarsCV,
-    LassoLarsIC,
-    LinearRegression,
-    MultiTaskElasticNet, MultiTaskElasticNetCV,
-    MultiTaskLasso, MultiTaskLassoCV,
-    OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV,
-    PassiveAggressiveRegressor,
-    RANSACRegressor,
-    Ridge, RidgeCV,
-    SGDRegressor,
-    TheilSenRegressor
-)
-try:
-    from sklearn.linear_model import QuantileRegressor
-except ImportError:
-    # available since sklearn>=1.0
-    QuantileRegressor = None
-try:
-    from sklearn.linear_model import PoissonRegressor
-except ImportError:
-    # available since sklearn>=0.23
-    PoissonRegressor = None
-try:
-    from sklearn.linear_model import TweedieRegressor
-except ImportError:
-    # available since sklearn>=0.23
-    TweedieRegressor = None
-try:
-    from sklearn.linear_model import SGDOneClassSVM
-except ImportError:
-    # available since sklearn>=1.0
-    SGDOneClassSVM = None
-
-from sklearn.svm import LinearSVR
-from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
-
-# Mixture
-from sklearn.mixture import (
-    GaussianMixture, BayesianGaussianMixture
-)
-
-# Multi-class
-from sklearn.multiclass import OneVsRestClassifier
-
-# Tree-based models
-from sklearn.ensemble import (
-    AdaBoostClassifier, AdaBoostRegressor,
-    BaggingClassifier, BaggingRegressor,
-    ExtraTreesClassifier, ExtraTreesRegressor,
-    GradientBoostingClassifier, GradientBoostingRegressor,
-    IsolationForest,
-    RandomForestClassifier, RandomForestRegressor, RandomTreesEmbedding,
-    VotingClassifier
-)
-try:
-    from sklearn.ensemble import VotingRegressor
-except ImportError:
-    # New in 0.21
-    VotingRegressor = None
-try:
-    from sklearn.ensemble import StackingClassifier, StackingRegressor
-except ImportError:
-    # New in 0.22
-    StackingClassifier = None
-    StackingRegressor = None
-from sklearn.tree import (
-    DecisionTreeClassifier, DecisionTreeRegressor,
-    ExtraTreeClassifier, ExtraTreeRegressor
-)
-
-# Gaussian processes
-from sklearn.gaussian_process import (
-    GaussianProcessClassifier, GaussianProcessRegressor
-)
-
-# GridSearchCV
-from sklearn.model_selection import GridSearchCV
-
-# MultiOutput
-from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
-
-# Support vector machines
-from sklearn.svm import NuSVC, NuSVR, SVC, SVR
-
-# K-nearest neighbors
-from sklearn.neighbors import (
-    KNeighborsClassifier,
-    KNeighborsRegressor,
-    LocalOutlierFactor,
-    NearestNeighbors,
-    RadiusNeighborsClassifier,
-    RadiusNeighborsRegressor,
-)
-try:
-    from sklearn.neighbors import (
-        KNeighborsTransformer,
-        NeighborhoodComponentsAnalysis,
-    )
-except ImportError:
-    # New in 0.22
-    KNeighborsTransformer = None
-    NeighborhoodComponentsAnalysis = None
-
-# Naive Bayes
-from sklearn.naive_bayes import (
-    BernoulliNB,
-    GaussianNB,
-    MultinomialNB,
-)
-try:
-    from sklearn.naive_bayes import CategoricalNB
-except ImportError:
-    # scikit-learn versions <= 0.21
-    CategoricalNB = None
-try:
-    from sklearn.naive_bayes import ComplementNB
-except ImportError:
-    # scikit-learn versions <= 0.19
-    ComplementNB = None
-
-# Neural Networks
-from sklearn.neural_network import MLPClassifier, MLPRegressor
-
-# Clustering
-from sklearn.cluster import KMeans, MiniBatchKMeans
-
-# Operators for preprocessing and feature engineering
-from sklearn.cross_decomposition import PLSRegression
-from sklearn.decomposition import (
-    KernelPCA,
-    IncrementalPCA,
-    PCA,
-    TruncatedSVD,
-)
-from sklearn.feature_extraction import DictVectorizer
-from sklearn.feature_extraction.text import (
-    CountVectorizer, TfidfTransformer, TfidfVectorizer
-)
-from sklearn.feature_selection import (
-    GenericUnivariateSelect, RFE, RFECV,
-    SelectFdr, SelectFpr, SelectFromModel,
-    SelectFwe, SelectKBest, SelectPercentile,
-    VarianceThreshold
-)
-try:
-    # 0.20
-    from sklearn.impute import SimpleImputer
-except ImportError:
-    # 0.19
-    from sklearn.preprocessing import Imputer as SimpleImputer
-from sklearn.preprocessing import Binarizer
-try:
-    from sklearn.preprocessing import Imputer
-except ImportError:
-    # removed in 0.21
-    Imputer = None
-try:
-    from sklearn.impute import KNNImputer
-except ImportError:
-    # New in 0.22
-    KNNImputer = None
-try:
-    from sklearn.preprocessing import KBinsDiscretizer
-except ImportError:
-    # not available in 0.19
-    KBinsDiscretizer = None
-from sklearn.preprocessing import (
-    LabelBinarizer, LabelEncoder,
-    Normalizer, OneHotEncoder
-)
-try:
-    from sklearn.preprocessing import OrdinalEncoder
-except ImportError:
-    # Not available in scikit-learn < 0.20.0
-    OrdinalEncoder = None
-from sklearn.preprocessing import (
-    FunctionTransformer,
-    KernelCenterer,
-    MaxAbsScaler,
-    MinMaxScaler,
-    PolynomialFeatures,
-    RobustScaler,
-    StandardScaler
-)
-
-try:
-    from sklearn.preprocessing import PowerTransformer
-except ImportError:
-    # Not available in scikit-learn < 0.20.0
-    PowerTransformer = None
-
-try:
-    from sklearn.ensemble import (
-        HistGradientBoostingClassifier,
-        HistGradientBoostingRegressor
-    )
-except ImportError:
-    # Second verification as these models still require
-    # manual activation.
-    try:
-        from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (  # noqa
-            HistGradientBoostingClassifier,
-            HistGradientBoostingRegressor
-        )
-    except ImportError:
-        HistGradientBoostingRegressor = None
-        HistGradientBoostingClassifier = None
-
-from sklearn.random_projection import GaussianRandomProjection
-
-try:
-    from sklearn.compose import ColumnTransformer
-except ImportError:
-    # ColumnTransformer was introduced in 0.20.
-    ColumnTransformer = None
-
-from sklearn.pipeline import Pipeline, FeatureUnion
-
-# Custom extension
-from .sklapi import CastRegressor, CastTransformer, ReplaceTransformer
-
-from .common._registration import register_converter, register_shape_calculator
-
-logger = logging.getLogger('skl2onnx')
-
-# In most cases, scikit-learn operator produces only one output.
-# However, each classifier has basically two outputs; one is the
-# predicted label and the other one is the probabilities of all
-# possible labels. Here is a list of supported scikit-learn
-# classifiers. In the parsing stage, we produce two outputs for objects
-# included in the following list and one output for everything not in
-# the list.
-sklearn_classifier_list = list(filter(lambda m: m is not None, [
-    AdaBoostClassifier,
-    BaggingClassifier,
-    BernoulliNB,
-    CategoricalNB,
-    CalibratedClassifierCV,
-    ComplementNB,
-    DecisionTreeClassifier,
-    ExtraTreeClassifier,
-    ExtraTreesClassifier,
-    GaussianNB,
-    GaussianProcessClassifier,
-    GradientBoostingClassifier,
-    HistGradientBoostingClassifier,
-    KNeighborsClassifier,
-    LinearDiscriminantAnalysis,
-    LinearSVC,
-    LogisticRegression,
-    LogisticRegressionCV,
-    MLPClassifier,
-    MultinomialNB,
-    NuSVC,
-    OneVsRestClassifier,
-    PassiveAggressiveClassifier,
-    Perceptron,
-    RandomForestClassifier,
-    SGDClassifier,
-    StackingClassifier,
-    SVC,
-    VotingClassifier,
-]))
-
-# Clustering algorithms: produces two outputs, label and score for
-# each cluster in most cases.
-cluster_list = [KMeans, MiniBatchKMeans]
-
-# Outlier detection algorithms:
-# produces two outputs, label and scores
-outlier_list = [IsolationForest, LocalOutlierFactor, OneClassSVM]
-
-
-# Associate scikit-learn types with our operator names. If two
-# scikit-learn models share a single name, it means their are
-# equivalent in terms of conversion.
-def build_sklearn_operator_name_map():
-    res = {k: "Sklearn" + k.__name__ for k in [
-        AdaBoostClassifier,
-        AdaBoostRegressor,
-        BaggingClassifier,
-        BaggingRegressor,
-        BayesianGaussianMixture,
-        BayesianRidge,
-        BernoulliNB,
-        Binarizer,
-        CalibratedClassifierCV,
-        CategoricalNB,
-        CastRegressor,
-        CastTransformer,
-        ColumnTransformer,
-        ComplementNB,
-        CountVectorizer,
-        DictVectorizer,
-        GaussianNB,
-        DecisionTreeClassifier,
-        DecisionTreeRegressor,
-        ExtraTreeClassifier,
-        ExtraTreeRegressor,
-        ExtraTreesClassifier,
-        ExtraTreesRegressor,
-        FeatureUnion,
-        FunctionTransformer,
-        GaussianMixture,
-        GaussianProcessClassifier,
-        GaussianProcessRegressor,
-        GaussianRandomProjection,
-        GenericUnivariateSelect,
-        GradientBoostingClassifier,
-        GradientBoostingRegressor,
-        HistGradientBoostingClassifier,
-        HistGradientBoostingRegressor,
-        Imputer,
-        IncrementalPCA,
-        IsolationForest,
-        KMeans,
-        LabelBinarizer,
-        LabelEncoder,
-        LinearRegression,
-        LinearSVC,
-        LinearSVR,
-        LocalOutlierFactor,
-        MaxAbsScaler,
-        MiniBatchKMeans,
-        MinMaxScaler,
-        MLPClassifier,
-        MLPRegressor,
-        MultinomialNB,
-        MultiOutputClassifier,
-        MultiOutputRegressor,
-        KBinsDiscretizer,
-        KernelCenterer,
-        KernelPCA,
-        KNeighborsClassifier,
-        KNeighborsRegressor,
-        KNeighborsTransformer,
-        KNNImputer,
-        NearestNeighbors,
-        NeighborhoodComponentsAnalysis,
-        Normalizer,
-        OneClassSVM,
-        OneHotEncoder,
-        OneVsRestClassifier,
-        OrdinalEncoder,
-        PCA,
-        PLSRegression,
-        Pipeline,
-        PoissonRegressor,
-        PolynomialFeatures,
-        PowerTransformer,
-        RadiusNeighborsClassifier,
-        RadiusNeighborsRegressor,
-        RandomForestClassifier,
-        RandomForestRegressor,
-        RandomTreesEmbedding,
-        RANSACRegressor,
-        ReplaceTransformer,
-        RFE,
-        RFECV,
-        RobustScaler,
-        SelectFdr,
-        SelectFpr,
-        SelectFromModel,
-        SelectFwe,
-        SelectKBest,
-        SelectPercentile,
-        SGDClassifier,
-        SGDOneClassSVM,
-        SimpleImputer,
-        StackingClassifier,
-        StackingRegressor,
-        SVC,
-        SVR,
-        TfidfVectorizer,
-        TfidfTransformer,
-        TruncatedSVD,
-        TweedieRegressor,
-        VarianceThreshold,
-        VotingClassifier,
-        VotingRegressor,
-    ] if k is not None}
-    res.update({
-        ARDRegression: 'SklearnLinearRegressor',
-        ElasticNet: 'SklearnLinearRegressor',
-        ElasticNetCV: 'SklearnLinearRegressor',
-        GridSearchCV: 'SklearnGridSearchCV',
-        HuberRegressor: 'SklearnLinearRegressor',
-        LinearRegression: 'SklearnLinearRegressor',
-        Lars: 'SklearnLinearRegressor',
-        LarsCV: 'SklearnLinearRegressor',
-        Lasso: 'SklearnLinearRegressor',
-        LassoCV: 'SklearnLinearRegressor',
-        LassoLars: 'SklearnLinearRegressor',
-        LassoLarsCV: 'SklearnLinearRegressor',
-        LassoLarsIC: 'SklearnLinearRegressor',
-        LinearDiscriminantAnalysis: 'SklearnLinearClassifier',
-        LogisticRegression: 'SklearnLinearClassifier',
-        LogisticRegressionCV: 'SklearnLinearClassifier',
-        MultiTaskElasticNet: 'SklearnLinearRegressor',
-        MultiTaskElasticNetCV: 'SklearnLinearRegressor',
-        MultiTaskLasso: 'SklearnLinearRegressor',
-        MultiTaskLassoCV: 'SklearnLinearRegressor',
-        NuSVC: 'SklearnSVC',
-        NuSVR: 'SklearnSVR',
-        OrthogonalMatchingPursuit: 'SklearnLinearRegressor',
-        OrthogonalMatchingPursuitCV: 'SklearnLinearRegressor',
-        PassiveAggressiveClassifier: 'SklearnSGDClassifier',
-        PassiveAggressiveRegressor: 'SklearnLinearRegressor',
-        Perceptron: 'SklearnSGDClassifier',
-        QuantileRegressor: 'SklearnLinearRegressor',
-        Ridge: 'SklearnLinearRegressor',
-        RidgeCV: 'SklearnLinearRegressor',
-        RidgeClassifier: 'SklearnLinearClassifier',
-        RidgeClassifierCV: 'SklearnLinearClassifier',
-        SGDRegressor: 'SklearnLinearRegressor',
-        StandardScaler: 'SklearnScaler',
-        TheilSenRegressor: 'SklearnLinearRegressor',
-    })
-    if None in res:
-        del res[None]
-    return res
-
-
-
[docs]def update_registered_converter(model, alias, shape_fct, convert_fct, - overwrite=True, parser=None, options=None): - """ - Registers or updates a converter for a new model so that - it can be converted when inserted in a *scikit-learn* pipeline. - - :param model: model class - :param alias: alias used to register the model - :param shape_fct: function which checks or modifies the expected - outputs, this function should be fast so that the whole graph - can be computed followed by the conversion of each model, - parallelized or not - :param convert_fct: function which converts a model - :param overwrite: False to raise exception if a converter - already exists - :param parser: overwrites the parser as well if not empty - :param options: registered options for this converter - - The alias is usually the library name followed by the model name. - Example: - - :: - - from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes - from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier - from skl2onnx import update_registered_converter - update_registered_converter( - SGDClassifier, 'SklearnLinearClassifier', - calculate_linear_classifier_output_shapes, - convert_sklearn_random_forest_classifier, - options={'zipmap': [True, False, 'columns'], - 'output_class_labels': [False, True], - 'raw_scores': [True, False]}) - - The function does not update the parser if not specified except if - option `'zipmap'` is added to the list. Every classifier - must declare this option to let the default parser - automatically handle that option. - """ # noqa - if (not overwrite and model in sklearn_operator_name_map - and alias != sklearn_operator_name_map[model]): - warnings.warn("Model '{0}' was already registered under alias " - "'{1}'.".format(model, sklearn_operator_name_map[model])) - sklearn_operator_name_map[model] = alias - register_converter(alias, convert_fct, overwrite=overwrite, - options=options) - register_shape_calculator(alias, shape_fct, overwrite=overwrite) - if parser is not None: - from ._parse import update_registered_parser - update_registered_parser(model, parser) - elif (options is not None and - ('zipmap' in options or 'output_class_labels' in options)): - from ._parse import ( - _parse_sklearn_classifier, update_registered_parser) - update_registered_parser(model, _parse_sklearn_classifier)
- - -def _get_sklearn_operator_name(model_type): - """ - Get operator name of the input argument - - :param model_type: A scikit-learn object (e.g., SGDClassifier - and Binarizer) - :return: A string which stands for the type of the input model in - our conversion framework - """ - if model_type not in sklearn_operator_name_map: - # No proper operator name found, it means a local operator. - alias = None - else: - alias = sklearn_operator_name_map[model_type] - logger.debug('[parsing] found alias=%r for type=%r.', alias, model_type) - return alias - - -def get_model_alias(model_type): - """ - Get alias model. Raise an exception if not found. - - :param model_type: A scikit-learn object (e.g., SGDClassifier - and Binarizer) - :return: A string which stands for the type of the input model in - our conversion framework - """ - res = _get_sklearn_operator_name(model_type) - if res is None: - raise RuntimeError("Unable to find alias for model '{}'. " - "The converter is likely missing." - "".format(model_type)) - return res - - -# registered converters -sklearn_operator_name_map = build_sklearn_operator_name_map() -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx._supported_operators - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx._supported_operators

+# SPDX-License-Identifier: Apache-2.0
+
+
+import warnings
+import logging
+
+# Calibrated classifier CV
+from sklearn.calibration import CalibratedClassifierCV
+
+# Linear classifiers
+from sklearn.linear_model import (
+    LogisticRegression, LogisticRegressionCV,
+    PassiveAggressiveClassifier,
+    Perceptron,
+    RidgeClassifier, RidgeClassifierCV,
+    SGDClassifier
+)
+from sklearn.svm import LinearSVC, OneClassSVM
+
+# Linear regressors
+from sklearn.linear_model import (
+    ARDRegression,
+    BayesianRidge,
+    ElasticNet, ElasticNetCV,
+    HuberRegressor,
+    Lars, LarsCV,
+    Lasso, LassoCV,
+    LassoLars, LassoLarsCV,
+    LassoLarsIC,
+    LinearRegression,
+    MultiTaskElasticNet, MultiTaskElasticNetCV,
+    MultiTaskLasso, MultiTaskLassoCV,
+    OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV,
+    PassiveAggressiveRegressor,
+    RANSACRegressor,
+    Ridge, RidgeCV,
+    SGDRegressor,
+    TheilSenRegressor
+)
+try:
+    from sklearn.linear_model import GammaRegressor
+except ImportError:
+    # available since sklearn>=1.1
+    GammaRegressor = None
+try:
+    from sklearn.linear_model import QuantileRegressor
+except ImportError:
+    # available since sklearn>=1.0
+    QuantileRegressor = None
+try:
+    from sklearn.linear_model import PoissonRegressor
+except ImportError:
+    # available since sklearn>=0.23
+    PoissonRegressor = None
+try:
+    from sklearn.linear_model import TweedieRegressor
+except ImportError:
+    # available since sklearn>=0.23
+    TweedieRegressor = None
+try:
+    from sklearn.linear_model import SGDOneClassSVM
+except ImportError:
+    # available since sklearn>=1.0
+    SGDOneClassSVM = None
+
+from sklearn.svm import LinearSVR
+from sklearn.discriminant_analysis import (
+    LinearDiscriminantAnalysis,
+    QuadraticDiscriminantAnalysis
+)
+
+# Mixture
+from sklearn.mixture import (
+    GaussianMixture, BayesianGaussianMixture
+)
+
+# Multi-class
+from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
+
+# Tree-based models
+from sklearn.ensemble import (
+    AdaBoostClassifier, AdaBoostRegressor,
+    BaggingClassifier, BaggingRegressor,
+    ExtraTreesClassifier, ExtraTreesRegressor,
+    GradientBoostingClassifier, GradientBoostingRegressor,
+    IsolationForest,
+    RandomForestClassifier, RandomForestRegressor, RandomTreesEmbedding,
+    VotingClassifier
+)
+try:
+    from sklearn.ensemble import VotingRegressor
+except ImportError:
+    # New in 0.21
+    VotingRegressor = None
+try:
+    from sklearn.ensemble import StackingClassifier, StackingRegressor
+except ImportError:
+    # New in 0.22
+    StackingClassifier = None
+    StackingRegressor = None
+from sklearn.tree import (
+    DecisionTreeClassifier, DecisionTreeRegressor,
+    ExtraTreeClassifier, ExtraTreeRegressor
+)
+
+# Gaussian processes
+from sklearn.gaussian_process import (
+    GaussianProcessClassifier, GaussianProcessRegressor
+)
+
+# GridSearchCV
+from sklearn.model_selection import GridSearchCV
+
+# MultiOutput
+from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
+
+# Support vector machines
+from sklearn.svm import NuSVC, NuSVR, SVC, SVR
+
+# K-nearest neighbors
+from sklearn.neighbors import (
+    KNeighborsClassifier,
+    KNeighborsRegressor,
+    LocalOutlierFactor,
+    NearestNeighbors,
+    RadiusNeighborsClassifier,
+    RadiusNeighborsRegressor,
+)
+try:
+    from sklearn.neighbors import (
+        KNeighborsTransformer,
+        NeighborhoodComponentsAnalysis,
+    )
+except ImportError:
+    # New in 0.22
+    KNeighborsTransformer = None
+    NeighborhoodComponentsAnalysis = None
+
+# Naive Bayes
+from sklearn.naive_bayes import (
+    BernoulliNB,
+    GaussianNB,
+    MultinomialNB,
+)
+try:
+    from sklearn.naive_bayes import CategoricalNB
+except ImportError:
+    # scikit-learn versions <= 0.21
+    CategoricalNB = None
+try:
+    from sklearn.naive_bayes import ComplementNB
+except ImportError:
+    # scikit-learn versions <= 0.19
+    ComplementNB = None
+
+# Neural Networks
+from sklearn.neural_network import MLPClassifier, MLPRegressor
+
+# Clustering
+from sklearn.cluster import KMeans, MiniBatchKMeans
+
+# Operators for preprocessing and feature engineering
+from sklearn.cross_decomposition import PLSRegression
+from sklearn.decomposition import (
+    KernelPCA,
+    IncrementalPCA,
+    PCA,
+    TruncatedSVD,
+)
+from sklearn.feature_extraction import (
+    DictVectorizer,
+    FeatureHasher,
+)
+from sklearn.feature_extraction.text import (
+    CountVectorizer, TfidfTransformer, TfidfVectorizer
+)
+from sklearn.feature_selection import (
+    GenericUnivariateSelect, RFE, RFECV,
+    SelectFdr, SelectFpr, SelectFromModel,
+    SelectFwe, SelectKBest, SelectPercentile,
+    VarianceThreshold
+)
+try:
+    # 0.20
+    from sklearn.impute import SimpleImputer
+except ImportError:
+    # 0.19
+    from sklearn.preprocessing import Imputer as SimpleImputer
+from sklearn.preprocessing import Binarizer
+try:
+    from sklearn.preprocessing import Imputer
+except ImportError:
+    # removed in 0.21
+    Imputer = None
+try:
+    from sklearn.impute import KNNImputer
+except ImportError:
+    # New in 0.22
+    KNNImputer = None
+try:
+    from sklearn.preprocessing import KBinsDiscretizer
+except ImportError:
+    # not available in 0.19
+    KBinsDiscretizer = None
+from sklearn.preprocessing import (
+    LabelBinarizer, LabelEncoder,
+    Normalizer, OneHotEncoder
+)
+try:
+    from sklearn.preprocessing import OrdinalEncoder
+except ImportError:
+    # Not available in scikit-learn < 0.20.0
+    OrdinalEncoder = None
+from sklearn.preprocessing import (
+    FunctionTransformer,
+    KernelCenterer,
+    MaxAbsScaler,
+    MinMaxScaler,
+    PolynomialFeatures,
+    RobustScaler,
+    StandardScaler
+)
+
+try:
+    from sklearn.preprocessing import PowerTransformer
+except ImportError:
+    # Not available in scikit-learn < 0.20.0
+    PowerTransformer = None
+
+try:
+    from sklearn.ensemble import (
+        HistGradientBoostingClassifier,
+        HistGradientBoostingRegressor
+    )
+except ImportError:
+    # Second verification as these models still require
+    # manual activation.
+    try:
+        from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (  # noqa
+            HistGradientBoostingClassifier,
+            HistGradientBoostingRegressor
+        )
+    except ImportError:
+        HistGradientBoostingRegressor = None
+        HistGradientBoostingClassifier = None
+
+from sklearn.random_projection import GaussianRandomProjection
+
+try:
+    from sklearn.compose import ColumnTransformer
+except ImportError:
+    # ColumnTransformer was introduced in 0.20.
+    ColumnTransformer = None
+
+from sklearn.pipeline import Pipeline, FeatureUnion
+
+# Custom extension
+from .sklapi import CastRegressor, CastTransformer, ReplaceTransformer
+
+from .common._registration import register_converter, register_shape_calculator
+
+logger = logging.getLogger('skl2onnx')
+
+# In most cases, scikit-learn operator produces only one output.
+# However, each classifier has basically two outputs; one is the
+# predicted label and the other one is the probabilities of all
+# possible labels. Here is a list of supported scikit-learn
+# classifiers. In the parsing stage, we produce two outputs for objects
+# included in the following list and one output for everything not in
+# the list.
+sklearn_classifier_list = list(filter(lambda m: m is not None, [
+    AdaBoostClassifier,
+    BaggingClassifier,
+    BernoulliNB,
+    CategoricalNB,
+    CalibratedClassifierCV,
+    ComplementNB,
+    DecisionTreeClassifier,
+    ExtraTreeClassifier,
+    ExtraTreesClassifier,
+    GaussianNB,
+    GaussianProcessClassifier,
+    GradientBoostingClassifier,
+    HistGradientBoostingClassifier,
+    KNeighborsClassifier,
+    LinearDiscriminantAnalysis,
+    LinearSVC,
+    LogisticRegression,
+    LogisticRegressionCV,
+    MLPClassifier,
+    MultinomialNB,
+    NuSVC,
+    OneVsOneClassifier,
+    OneVsRestClassifier,
+    PassiveAggressiveClassifier,
+    Perceptron,
+    QuadraticDiscriminantAnalysis,
+    RandomForestClassifier,
+    SGDClassifier,
+    StackingClassifier,
+    SVC,
+    VotingClassifier,
+]))
+
+# Clustering algorithms: produces two outputs, label and score for
+# each cluster in most cases.
+cluster_list = [KMeans, MiniBatchKMeans]
+
+# Outlier detection algorithms:
+# produces two outputs, label and scores
+outlier_list = [IsolationForest, LocalOutlierFactor, OneClassSVM]
+
+
+# Associate scikit-learn types with our operator names. If two
+# scikit-learn models share a single name, it means their are
+# equivalent in terms of conversion.
+def build_sklearn_operator_name_map():
+    res = {k: "Sklearn" + k.__name__ for k in [
+        AdaBoostClassifier,
+        AdaBoostRegressor,
+        BaggingClassifier,
+        BaggingRegressor,
+        BayesianGaussianMixture,
+        BayesianRidge,
+        BernoulliNB,
+        Binarizer,
+        CalibratedClassifierCV,
+        CategoricalNB,
+        CastRegressor,
+        CastTransformer,
+        ColumnTransformer,
+        ComplementNB,
+        CountVectorizer,
+        DictVectorizer,
+        DecisionTreeClassifier,
+        DecisionTreeRegressor,
+        ExtraTreeClassifier,
+        ExtraTreeRegressor,
+        ExtraTreesClassifier,
+        ExtraTreesRegressor,
+        FeatureHasher,
+        FeatureUnion,
+        FunctionTransformer,
+        GammaRegressor,
+        GaussianNB,
+        GaussianMixture,
+        GaussianProcessClassifier,
+        GaussianProcessRegressor,
+        GaussianRandomProjection,
+        GenericUnivariateSelect,
+        GradientBoostingClassifier,
+        GradientBoostingRegressor,
+        HistGradientBoostingClassifier,
+        HistGradientBoostingRegressor,
+        Imputer,
+        IncrementalPCA,
+        IsolationForest,
+        KMeans,
+        LabelBinarizer,
+        LabelEncoder,
+        LinearRegression,
+        LinearSVC,
+        LinearSVR,
+        LocalOutlierFactor,
+        MaxAbsScaler,
+        MiniBatchKMeans,
+        MinMaxScaler,
+        MLPClassifier,
+        MLPRegressor,
+        MultinomialNB,
+        MultiOutputClassifier,
+        MultiOutputRegressor,
+        KBinsDiscretizer,
+        KernelCenterer,
+        KernelPCA,
+        KNeighborsClassifier,
+        KNeighborsRegressor,
+        KNeighborsTransformer,
+        KNNImputer,
+        NearestNeighbors,
+        NeighborhoodComponentsAnalysis,
+        Normalizer,
+        OneClassSVM,
+        OneHotEncoder,
+        OneVsOneClassifier,
+        OneVsRestClassifier,
+        OrdinalEncoder,
+        PCA,
+        PLSRegression,
+        Pipeline,
+        PoissonRegressor,
+        PolynomialFeatures,
+        PowerTransformer,
+        QuadraticDiscriminantAnalysis,
+        RadiusNeighborsClassifier,
+        RadiusNeighborsRegressor,
+        RandomForestClassifier,
+        RandomForestRegressor,
+        RandomTreesEmbedding,
+        RANSACRegressor,
+        ReplaceTransformer,
+        RFE,
+        RFECV,
+        RobustScaler,
+        SelectFdr,
+        SelectFpr,
+        SelectFromModel,
+        SelectFwe,
+        SelectKBest,
+        SelectPercentile,
+        SGDClassifier,
+        SGDOneClassSVM,
+        SimpleImputer,
+        StackingClassifier,
+        StackingRegressor,
+        SVC,
+        SVR,
+        TfidfVectorizer,
+        TfidfTransformer,
+        TruncatedSVD,
+        TweedieRegressor,
+        VarianceThreshold,
+        VotingClassifier,
+        VotingRegressor,
+    ] if k is not None}
+    res.update({
+        ARDRegression: 'SklearnLinearRegressor',
+        ElasticNet: 'SklearnLinearRegressor',
+        ElasticNetCV: 'SklearnLinearRegressor',
+        GridSearchCV: 'SklearnGridSearchCV',
+        HuberRegressor: 'SklearnLinearRegressor',
+        LinearRegression: 'SklearnLinearRegressor',
+        Lars: 'SklearnLinearRegressor',
+        LarsCV: 'SklearnLinearRegressor',
+        Lasso: 'SklearnLinearRegressor',
+        LassoCV: 'SklearnLinearRegressor',
+        LassoLars: 'SklearnLinearRegressor',
+        LassoLarsCV: 'SklearnLinearRegressor',
+        LassoLarsIC: 'SklearnLinearRegressor',
+        LinearDiscriminantAnalysis: 'SklearnLinearClassifier',
+        LogisticRegression: 'SklearnLinearClassifier',
+        LogisticRegressionCV: 'SklearnLinearClassifier',
+        MultiTaskElasticNet: 'SklearnLinearRegressor',
+        MultiTaskElasticNetCV: 'SklearnLinearRegressor',
+        MultiTaskLasso: 'SklearnLinearRegressor',
+        MultiTaskLassoCV: 'SklearnLinearRegressor',
+        NuSVC: 'SklearnSVC',
+        NuSVR: 'SklearnSVR',
+        OrthogonalMatchingPursuit: 'SklearnLinearRegressor',
+        OrthogonalMatchingPursuitCV: 'SklearnLinearRegressor',
+        PassiveAggressiveClassifier: 'SklearnSGDClassifier',
+        PassiveAggressiveRegressor: 'SklearnLinearRegressor',
+        Perceptron: 'SklearnSGDClassifier',
+        QuantileRegressor: 'SklearnLinearRegressor',
+        Ridge: 'SklearnLinearRegressor',
+        RidgeCV: 'SklearnLinearRegressor',
+        RidgeClassifier: 'SklearnLinearClassifier',
+        RidgeClassifierCV: 'SklearnLinearClassifier',
+        SGDRegressor: 'SklearnLinearRegressor',
+        StandardScaler: 'SklearnScaler',
+        TheilSenRegressor: 'SklearnLinearRegressor',
+    })
+    if None in res:
+        del res[None]
+    return res
+
+
+
[docs]def update_registered_converter(model, alias, shape_fct, convert_fct, + overwrite=True, parser=None, options=None): + """ + Registers or updates a converter for a new model so that + it can be converted when inserted in a *scikit-learn* pipeline. + + :param model: model class + :param alias: alias used to register the model + :param shape_fct: function which checks or modifies the expected + outputs, this function should be fast so that the whole graph + can be computed followed by the conversion of each model, + parallelized or not + :param convert_fct: function which converts a model + :param overwrite: False to raise exception if a converter + already exists + :param parser: overwrites the parser as well if not empty + :param options: registered options for this converter + + The alias is usually the library name followed by the model name. + Example: + + :: + + from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes + from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier + from skl2onnx import update_registered_converter + update_registered_converter( + SGDClassifier, 'SklearnLinearClassifier', + calculate_linear_classifier_output_shapes, + convert_sklearn_random_forest_classifier, + options={'zipmap': [True, False, 'columns'], + 'output_class_labels': [False, True], + 'raw_scores': [True, False]}) + + The function does not update the parser if not specified except if + option `'zipmap'` is added to the list. Every classifier + must declare this option to let the default parser + automatically handle that option. + """ # noqa + if (not overwrite and model in sklearn_operator_name_map + and alias != sklearn_operator_name_map[model]): + warnings.warn("Model '{0}' was already registered under alias " + "'{1}'.".format(model, sklearn_operator_name_map[model])) + sklearn_operator_name_map[model] = alias + register_converter(alias, convert_fct, overwrite=overwrite, + options=options) + register_shape_calculator(alias, shape_fct, overwrite=overwrite) + if parser is not None: + from ._parse import update_registered_parser + update_registered_parser(model, parser) + elif (options is not None and + ('zipmap' in options or 'output_class_labels' in options)): + from ._parse import ( + _parse_sklearn_classifier, update_registered_parser) + update_registered_parser(model, _parse_sklearn_classifier)
+ + +def _get_sklearn_operator_name(model_type): + """ + Get operator name of the input argument + + :param model_type: A scikit-learn object (e.g., SGDClassifier + and Binarizer) + :return: A string which stands for the type of the input model in + our conversion framework + """ + if model_type not in sklearn_operator_name_map: + # No proper operator name found, it means a local operator. + alias = None + else: + alias = sklearn_operator_name_map[model_type] + logger.debug('[parsing] found alias=%r for type=%r.', alias, model_type) + return alias + + +def get_model_alias(model_type): + """ + Get alias model. Raise an exception if not found. + + :param model_type: A scikit-learn object (e.g., SGDClassifier + and Binarizer) + :return: A string which stands for the type of the input model in + our conversion framework + """ + res = _get_sklearn_operator_name(model_type) + if res is None: + raise RuntimeError("Unable to find alias for model '{}'. " + "The converter is likely missing." + "".format(model_type)) + return res + + +# registered converters +sklearn_operator_name_map = build_sklearn_operator_name_map() +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/algebra/onnx_operator_mixin.html b/_modules/skl2onnx/algebra/onnx_operator_mixin.html new file mode 100644 index 000000000..a17aa368a --- /dev/null +++ b/_modules/skl2onnx/algebra/onnx_operator_mixin.html @@ -0,0 +1,529 @@ + + + + + + + + skl2onnx.algebra.onnx_operator_mixin - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.algebra.onnx_operator_mixin

+# SPDX-License-Identifier: Apache-2.0
+
+import warnings
+from sklearn.base import BaseEstimator
+from onnx import shape_inference
+from ..common._topology import Scope, Operator
+from ..common._container import ModelComponentContainer
+from ..common._registration import get_converter, get_shape_calculator
+from ..common._topology import Variable
+from .._supported_operators import sklearn_operator_name_map
+from .onnx_operator import OnnxOperator
+from .type_helper import guess_initial_types
+
+
+class OnnxOperatorMixin:
+    """
+    Base class for *scikit-learn* operators
+    sharing an API to convert object to *ONNX*.
+    """
+
+    def to_onnx(self, X=None, name=None,
+                options=None, white_op=None, black_op=None,
+                final_types=None, target_opset=None, verbose=0):
+        """
+        Converts the model in *ONNX* format.
+        It calls method *_to_onnx* which must be
+        overloaded.
+
+        :param X: training data, at least one sample,
+            it is used to guess the type of the input data.
+        :param name: name of the model, if None,
+            it is replaced by the the class name.
+        :param options: specific options given to converters
+            (see :ref:`l-conv-options`)
+        :param white_op: white list of ONNX nodes allowed
+            while converting a pipeline, if empty, all are allowed
+        :param black_op: black list of ONNX nodes allowed
+            while converting a pipeline, if empty, none are blacklisted
+        :param final_types: a python list. Works the same way as initial_types
+            but not mandatory, it is used to overwrites the type
+            (if type is not None) and the name of every output.
+        :param target_opset: to overwrite `self.op_version`
+        :param verbose: displays information while converting
+        """
+        from .. import convert_sklearn
+        if X is None:
+            initial_types = self.infer_initial_types()
+        else:
+            initial_types = guess_initial_types(X, None)
+        if not hasattr(self, 'op_version'):
+            if name is None:
+                name = self.__class__.__name__
+            raise AttributeError(
+                "Attribute 'op_version' is missing for '{}' "
+                "(model: '{}').".format(
+                    self.__class__.__name__, name))
+        return convert_sklearn(
+            self, initial_types=initial_types,
+            target_opset=target_opset or self.op_version, options=options,
+            white_op=white_op, black_op=black_op, final_types=final_types,
+            verbose=verbose)
+
+    def infer_initial_types(self):
+        """
+        Infers initial types.
+        """
+        if hasattr(self, 'enumerate_initial_types'):
+            return list(self.enumerate_initial_types())
+        raise RuntimeError("Method enumerate_initial_types is missing "
+                           "and initial_types are not defined.")
+
+    def _find_sklearn_parent(self):
+        for cl in self.__class__.__bases__:
+            if issubclass(cl, BaseEstimator):
+                return cl
+        raise RuntimeError("Unable to find any parent inherited from "
+                           "BaseEstimator: {}.".format(
+                               ", ".join(map(str, self.__class__.__bases__))))
+
+    def to_onnx_operator(self, inputs=None, outputs=None,
+                         target_opset=None, options=None):
+        """
+        This function must be overloaded.
+        """
+        raise NotImplementedError()
+
+    def onnx_parser(self):
+        """
+        Returns a parser for this model.
+        If not overloaded, it calls the converter to guess the number
+        of outputs. If it still fails, it fetches the parser
+        mapped to the first *scikit-learn* parent
+        it can find.
+        """
+        def parser(scope=None, inputs=None):
+            try:
+                op = self.to_onnx_operator(inputs=inputs, outputs=None)
+            except NotImplementedError:
+                self._find_sklearn_parent()
+                return None
+
+            names = []
+            while True:
+                try:
+                    name = op.get_output_name(len(names), scope=scope)
+                    if name is None:
+                        break
+                    names.append(name)
+                except AttributeError:
+                    return None
+                except IndexError:
+                    break
+            return names
+        return parser
+
+    def get_inputs(self, inputs, i):
+        if i >= len(inputs):
+            return OnnxOperator.OnnxOperatorVariable(i)
+        else:
+            input = inputs[i]
+            if isinstance(input, (str, OnnxOperator.UnscopedVariable)):
+                return OnnxOperator.OnnxOperatorVariable(i, input)
+            else:
+                return input
+
+    def onnx_shape_calculator(self):
+        """
+        Returns a shape calculator for this model.
+        If not overloaded, it fetches the parser
+        mapped to the first *scikit-learn* parent
+        it can find.
+        """
+        if not hasattr(self, 'op_version'):
+            raise AttributeError(
+                "Class '{}' should have an attribute 'op_version'.".format(
+                    self.__class__.__name__))
+
+        try:
+            op = self.to_onnx_operator()
+        except NotImplementedError:
+            parent = self._find_sklearn_parent()
+            name = sklearn_operator_name_map.get(
+                parent, "Sklearn" + parent.__name__)
+            return get_shape_calculator(name)
+
+        def shape_calculator(operator):
+            onx = op.to_onnx(operator.inputs, operator.outputs,
+                             target_opset=self.op_version)
+            inferred_model = shape_inference.infer_shapes(onx)
+            shapes = Variable.from_pb(inferred_model.graph.value_info)
+            shapes = {shape.onnx_name: shape for shape in shapes}
+            for o in operator.outputs:
+                name = o.onnx_name
+                if name not in shapes:
+                    raise RuntimeError("Shape of output '{}' cannot be "
+                                       "infered. onnx_shape_calculator "
+                                       "must be overriden and return "
+                                       "a shape calculator.".format(name))
+                o.set_type(shapes[name].type)
+
+        return shape_calculator
+
+    def onnx_converter(self):
+        """
+        Returns a converter for this model.
+        If not overloaded, it fetches the converter
+        mapped to the first *scikit-learn* parent
+        it can find.
+        """
+        def converter(scope: Scope, operator: Operator,
+                      container: ModelComponentContainer):
+            inputs = operator.inputs  # getattr(self, "parsed_inputs_", None)
+            outputs = operator.outputs  # kwargs.get('outputs', None)
+            op_version = container.target_opset
+            options = scope.get_options(operator.raw_operator, fail=False)
+            try:
+                if inputs:
+                    op = self.to_onnx_operator(
+                        inputs=inputs, outputs=outputs,
+                        target_opset=op_version, options=options)
+                else:
+                    op = self.to_onnx_operator(
+                        target_opset=op_version,
+                        outputs=outputs, options=options)
+            except TypeError:
+                warnings.warn(
+                    "Signature should be to_onnx_operator(self, inputs=None, "
+                    "outputs=None, target_opset=None, **kwargs). "
+                    "This will be the case in version 1.11, class=%r."
+                    "" % type(self),
+                    DeprecationWarning)
+                try:
+                    if inputs:
+                        op = self.to_onnx_operator(
+                            inputs=inputs, outputs=outputs)
+                    else:
+                        op = self.to_onnx_operator()
+                except NotImplementedError:
+                    parent = self._find_sklearn_parent()
+                    name = sklearn_operator_name_map[parent]
+                    conv = get_converter(name)
+                    return conv(scope, operator, container)
+            except NotImplementedError:
+                parent = self._find_sklearn_parent()
+                name = sklearn_operator_name_map[parent]
+                conv = get_converter(name)
+                return conv(scope, operator, container)
+
+            op.add_to(scope, container, operator=operator)
+
+        return converter
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/skl2onnx/algebra/onnx_ops.html b/_modules/skl2onnx/algebra/onnx_ops.html new file mode 100644 index 000000000..40710668b --- /dev/null +++ b/_modules/skl2onnx/algebra/onnx_ops.html @@ -0,0 +1,792 @@ + + + + + + + + skl2onnx.algebra.onnx_ops - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.algebra.onnx_ops

+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Place holder for all ONNX operators.
+"""
+import sys
+import os
+import numpy as np
+try:
+    from scipy.sparse import coo_matrix
+except ImportError:
+    from scipy.sparse.coo import coo_matrix
+import onnx
+from ..common.data_types import DataType
+from ..common._topology import Variable
+from .automation import get_rst_doc
+from ._cache import cache_folder
+
+
+def ClassFactory(class_name, op_name, inputs, outputs,
+                 input_range, output_range,
+                 domain, attr_names, doc,
+                 deprecated, since_version,
+                 past_version):
+    from .onnx_operator import OnnxOperator, OnnxOperatorItem
+
+    def __init__(self, *args, **kwargs):
+
+        op_version = kwargs.pop('op_version', None)
+        if isinstance(op_version, dict):
+            op_version = op_version.get(domain, None)
+
+        if op_version is None:
+            if len(args) == 0 and input_range[0] == input_range[1]:
+                args = [_[0] for _ in self.__class__.expected_inputs]
+            if not (input_range[0] <= len(args) <= input_range[1]):
+                raise RuntimeError("Unexpected number of inputs, "
+                                   "got {}, expecting {} for operator "
+                                   "'{}'.".format(
+                                       len(args), len(inputs), op_name))
+
+        attr_names = self.attr_names
+        if '_' in self.__class__.__name__:
+            op_version_class = int(self.__class__.__name__.split('_')[-1])
+            if op_version is None:
+                op_version = op_version_class
+            try:
+                op_version = min(op_version, op_version_class)
+            except TypeError:
+                raise TypeError(
+                    "Could not compare versions {} ? {} for "
+                    "class '{}' since_version {}. Parameter 'op_version' "
+                    "is probably missing when the class "
+                    "is instantiated.".format(
+                        op_version, op_version_class, class_name,
+                        since_version))
+        else:
+            op_version_class = None
+
+        # By default, the op_version is None.
+        # None means the latest available.
+        if op_version is None:
+            op_version = since_version
+
+        found = None
+        if op_version is not None:
+            # attr_names refers to the most recent version of
+            # this operator. We may need an older one.
+            for op in range(op_version, 0, -1):
+                name = '{}_{}'.format(self.__class__.__name__, op)
+                if name in self.past_version:
+                    found = (name, op)
+                    attr_names = self.past_version[name].attr_names
+                    break
+        if (op_version_class is not None and found is not None and
+                found[-1] != op_version_class):
+            raise RuntimeError(
+                "op_version={} does not refer to the same opset as the class "
+                "name ('{}').".format(op_version, self.__class__.__name__))
+        for key in kwargs:
+            if key in {'output_names', 'op_version', 'domain', 'ir_version',
+                       'global_context', 'clear_subgraph_inputs'}:
+                continue
+            if key not in attr_names:
+                raise TypeError("Argument '%s' not valid for '%s' opset=%s."
+                                % (key, op_name, op_version))
+
+        if op_version is not None:
+            kwargs['op_version'] = op_version
+        # This class can only be created by a user. Let's check
+        # types are either a variable, an operator or an array.
+        for i, a in enumerate(args):
+            if isinstance(a, tuple):
+                if len(a) != 2:
+                    raise TypeError(
+                        "Input %r is a tuple or class %r, it must have two "
+                        "elements (name, type) not %r." % (i, class_name, a))
+                if (not isinstance(a[0], str) or
+                        not isinstance(a[1], DataType)):
+                    raise TypeError(
+                        "Input %r is a tuple or class %r, it must be a tuple "
+                        "(name, type) not %r." % (i, class_name, a))
+                continue
+            if not isinstance(a, (
+                    Variable, OnnxOperator, np.ndarray, str,
+                    OnnxOperatorItem, coo_matrix)):
+                raise TypeError(
+                    "Unexpected type %r for input %r of operator %r. "
+                    "It must be an instance of Variable (or a string), "
+                    "OnnxOperator, OnnxOperatorItem, numpy.ndarray, "
+                    "coo_matrix)." % (
+                        type(a), i, class_name))
+        OnnxOperator.__init__(self, *args, **kwargs)
+
+    newclass = type(class_name, (OnnxOperator,),
+                    {"__init__": __init__, '__doc__': doc,
+                     'expected_inputs': inputs,
+                     'expected_outputs': outputs,
+                     'operator_name': op_name,
+                     'input_range': input_range,
+                     'output_range': output_range,
+                     'domain': domain,
+                     'is_deprecated': deprecated,
+                     'since_version': since_version,
+                     'past_version': past_version,
+                     'attr_names': attr_names,
+                     '__module__': __name__})
+    return newclass
+
+
+def dynamic_class_creation(cache=False):
+    """
+    Automatically generates classes for each of the operators
+    module *onnx* defines and described at
+    `Operators
+    <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
+    and `Operators
+    <https://github.com/onnx/onnx/blob/master/docs/
+    Operators-ml.md>`_.
+    """
+    cache_dir = cache_folder()
+    res = {}
+    for schema in onnx.defs.get_all_schemas_with_history():
+        if schema.support_level == schema.SupportType.EXPERIMENTAL:
+            # Skips experimental operators.
+            continue
+        # Multiple version can coexist. The last one is kept.
+        if schema.name in res:
+            if schema.since_version > res[schema.name].since_version:
+                # We keep the most recent one.
+                res[schema.name] = schema
+        else:
+            res[schema.name] = schema
+        res[schema.name + '_' + str(schema.since_version)] = schema
+    cls = {}
+
+    def _c(obj, label, i):
+        name = '%s%d' % (obj.name or label, i)
+        tys = obj.typeStr or ''
+        return (name, tys)
+
+    for name in sorted(res):
+        schema = res[name]
+        inputs = [_c(o, 'I', i) for i, o in enumerate(schema.inputs)]
+        outputs = [_c(o, 'O', i) for i, o in enumerate(schema.outputs)]
+        args = [p for p in schema.attributes]
+
+        if '_' in name:
+            class_name = "Onnx" + name
+        else:
+            class_name = "Onnx" + schema.name
+
+        filename = os.path.join(
+            cache_dir,
+            schema.name + '_' + str(schema.since_version) + ".rst")
+        if not cache and os.path.exists(filename):
+            with open(filename, "r", encoding="utf-8") as f:
+                doc = f.read()
+        else:
+            doc = get_rst_doc(schema)
+            if cache:
+                with open(filename, 'w', encoding='utf-8') as f:
+                    f.write(doc)
+
+        cl = ClassFactory(class_name, schema.name, inputs, outputs,
+                          [schema.min_input, schema.max_input],
+                          [schema.min_output, schema.max_output],
+                          schema.domain, args,
+                          "**Version**" + doc.split('**Version**')[-1],
+                          getattr(schema, 'deprecated', False),
+                          schema.since_version, {})
+        cls[class_name] = cl
+
+    # Retrieves past classes.
+    for name in cls:
+        if '_' not in name:
+            continue
+        main, version = name.split('_')
+        last = cls[main]
+        last.past_version[name] = cls[name]
+
+    return cls
+
+
+def _update_module():
+    """
+    Dynamically updates the module with operators defined
+    by *ONNX*.
+    """
+    res = dynamic_class_creation()
+    this = sys.modules[__name__]
+    for k, v in res.items():
+        setattr(this, k, v)
+
+
+_update_module()
+
+
+def OnnxReduceSumApi11(*x, axes=None, keepdims=1, op_version=None,
+                       output_names=None):
+    """
+    Adds operator ReduceSum with opset>=13 following API from opset 12.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 13:
+        if axes is None:
+            return OnnxReduceSum(  # noqa
+                *x, keepdims=keepdims, op_version=op_version,
+                output_names=output_names)
+        return OnnxReduceSum(  # noqa
+            *x, np.array(axes, dtype=np.int64),
+            keepdims=keepdims, op_version=op_version,
+            output_names=output_names)
+    if op_version >= 11:
+        if axes is None:
+            return OnnxReduceSum_11(  # noqa
+                *x, keepdims=keepdims,
+                op_version=op_version, output_names=output_names)
+        return OnnxReduceSum_11(  # noqa
+            *x, axes=axes, keepdims=keepdims,
+            op_version=op_version, output_names=output_names)
+    if axes is None:
+        return OnnxReduceSum_1(*x, keepdims=keepdims,  # noqa
+                               op_version=op_version,
+                               output_names=output_names)
+    return OnnxReduceSum_1(*x, axes=axes, keepdims=keepdims,  # noqa
+                           op_version=op_version, output_names=output_names)
+
+
+def OnnxReduceAnyApi18(cl18, cl13, cl11, cl1, *x, axes=None, keepdims=1,
+                       op_version=None, output_names=None):
+    """
+    Adds operator Reduce* with opset>=18 following API from opset 17.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 18:
+        if axes is None:
+            return cl18(  # noqa
+                *x, keepdims=keepdims, op_version=op_version,
+                output_names=output_names)
+        return cl18(  # noqa
+            *x, np.array(axes, dtype=np.int64),
+            keepdims=keepdims, op_version=op_version,
+            output_names=output_names)
+    if op_version >= 13:
+        if axes is None:
+            return cl13(*x, keepdims=keepdims,  # noqa
+                        op_version=op_version,
+                        output_names=output_names)
+        return cl13(*x, axes=axes, keepdims=keepdims,  # noqa
+                    op_version=op_version, output_names=output_names)
+    if op_version >= 11:
+        if axes is None:
+            return cl11(*x, keepdims=keepdims,  # noqa
+                        op_version=op_version,
+                        output_names=output_names)
+        return cl11(*x, axes=axes, keepdims=keepdims,  # noqa
+                    op_version=op_version, output_names=output_names)
+    if axes is None:
+        return cl1(*x, keepdims=keepdims,  # noqa
+                   op_version=op_version,
+                   output_names=output_names)
+    return cl1(*x, axes=axes, keepdims=keepdims,  # noqa
+               op_version=op_version, output_names=output_names)
+
+
+def OnnxReduceSumSquareApi18(*x, axes=None, keepdims=1, op_version=None,
+                             output_names=None):
+    """
+    Adds operator ReduceSumSquare with opset>=18 following API from opset 17.
+    """
+    if axes is None or not isinstance(axes, (list, np.ndarray)):
+        raise TypeError(f"axes must be a list or an array not {type(axes)}.")
+    return OnnxReduceAnyApi18(
+        OnnxReduceSumSquare, OnnxReduceSumSquare_13,  # noqa
+        OnnxReduceSumSquare_11, OnnxReduceSumSquare_1,  # noqa
+        *x, axes=axes, keepdims=keepdims, op_version=op_version,
+        output_names=output_names)
+
+
+def OnnxReduceMeanApi18(*x, axes=None, keepdims=1, op_version=None,
+                        output_names=None):
+    """
+    Adds operator ReduceMean with opset>=18 following API from opset 17.
+    """
+    return OnnxReduceAnyApi18(
+        OnnxReduceMean, OnnxReduceMean_13,  # noqa
+        OnnxReduceMean_11, OnnxReduceMean_1,  # noqa
+        *x, axes=axes, keepdims=keepdims, op_version=op_version,
+        output_names=output_names)
+
+
+def OnnxReduceMaxApi18(*x, axes=None, keepdims=1, op_version=None,
+                       output_names=None):
+    """
+    Adds operator ReduceMean with opset>=18 following API from opset 17.
+    """
+    return OnnxReduceAnyApi18(
+        OnnxReduceMax, OnnxReduceMax_13,  # noqa
+        OnnxReduceMax_11, OnnxReduceMax_1,  # noqa
+        *x, axes=axes, keepdims=keepdims, op_version=op_version,
+        output_names=output_names)
+
+
+def OnnxReduceLogSumExpApi18(*x, axes=None, keepdims=1, op_version=None,
+                             output_names=None):
+    """
+    Adds operator ReduceMean with opset>=18 following API from opset 17.
+    """
+    return OnnxReduceAnyApi18(
+        OnnxReduceLogSumExp, OnnxReduceLogSumExp_13,  # noqa
+        OnnxReduceLogSumExp_11, OnnxReduceLogSumExp_1,  # noqa
+        *x, axes=axes, keepdims=keepdims, op_version=op_version,
+        output_names=output_names)
+
+
+def OnnxReduceL2Api18(*x, axes=None, keepdims=1, op_version=None,
+                      output_names=None):
+    """
+    Adds operator ReduceMean with opset>=18 following API from opset 17.
+    """
+    return OnnxReduceAnyApi18(
+        OnnxReduceL2, OnnxReduceL2_13,  # noqa
+        OnnxReduceL2_11, OnnxReduceL2_1,  # noqa
+        *x, axes=axes, keepdims=keepdims, op_version=op_version,
+        output_names=output_names)
+
+
+def OnnxSplitApi18(*x, axis=0, split=None, num_outputs=None,
+                   op_version=None, output_names=None):
+    """
+    Adds operator Split with opset>=13 following API from opset 11.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 18:
+        if split is None:
+            if num_outputs is None:
+                if output_names is None:
+                    raise RuntimeError(
+                        "split or num_outputs or output_names "
+                        "must be specified since opset 18.")
+                num_outputs = len(output_names)
+            if num_outputs is None:
+                raise AttributeError(
+                    "num_outputs cannot be None for Split-18.")
+            return OnnxSplit_18(  # noqa
+                *x, axis=axis, op_version=op_version,
+                num_outputs=num_outputs, output_names=output_names)
+        if num_outputs is None:
+            return OnnxSplit_18(  # noqa
+                *x, np.array(split, dtype=np.int64), axis=axis,
+                op_version=op_version, output_names=output_names)
+        return OnnxSplit_18(  # noqa
+            *x, np.array(split, dtype=np.int64), axis=axis,
+            num_outputs=num_outputs, op_version=op_version,
+            output_names=output_names)
+    if op_version >= 13:
+        if split is None:
+            return OnnxSplit_13(  # noqa
+                *x, axis=axis, op_version=op_version,
+                output_names=output_names)
+        return OnnxSplit_13(  # noqa
+            *x, np.array(split, dtype=np.int64), axis=axis,
+            op_version=op_version, output_names=output_names)
+    if op_version >= 11:
+        if split is None:
+            return OnnxSplit_11(  # noqa
+                *x, axis=axis, op_version=op_version,
+                output_names=output_names)
+        return OnnxSplit_11(  # noqa
+            *x, split=split, axis=axis, op_version=op_version,
+            output_names=output_names)
+    if split is None:
+        return OnnxSplit_2(  # noqa
+            *x, axis=axis, op_version=op_version, output_names=output_names)
+    return OnnxSplit_2(*x, split=split, axis=axis,  # noqa
+                       op_version=op_version, output_names=output_names)
+
+
+def OnnxSqueezeApi11(*x, axes=None, op_version=None,
+                     output_names=None):
+    """
+    Adds operator Squeeze with opset>=13 following API from opset 11.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 13:
+        return OnnxSqueeze(  # noqa
+            *x, np.array(axes, dtype=np.int64),
+            op_version=op_version, output_names=output_names)
+    if op_version >= 11:
+        return OnnxSqueeze_11(  # noqa
+            *x, axes=axes, op_version=op_version,
+            output_names=output_names)
+    return OnnxSqueeze_1(*x, axes=axes,  # noqa
+                         op_version=op_version, output_names=output_names)
+
+
+def OnnxUnsqueezeApi11(*x, axes=None, op_version=None,
+                       output_names=None):
+    """
+    Adds operator Unsqueeze with opset>=13 following API from opset 11.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 13:
+        return OnnxUnsqueeze(  # noqa
+            *x, np.array(axes, dtype=np.int64),
+            op_version=op_version, output_names=output_names)
+    if op_version >= 11:
+        return OnnxUnsqueeze_11(  # noqa
+            *x, axes=axes, op_version=op_version,
+            output_names=output_names)
+    return OnnxUnsqueeze_1(*x, axes=axes,  # noqa
+                           op_version=op_version, output_names=output_names)
+
+
+def OnnxReduceL2_typed(dtype, x, axes=None, keepdims=1, op_version=None,
+                       output_names=None):
+    """
+    Adds operator ReduceL2 for float or double.
+    """
+    if dtype == np.float32:
+        return OnnxReduceL2Api18(  # noqa
+            x, axes=axes, keepdims=keepdims,
+            op_version=op_version, output_names=output_names)
+    x2 = OnnxMul(x, x, op_version=op_version)  # noqa
+    red = OnnxReduceSumApi11(
+        x2, axes=[1], keepdims=1, op_version=op_version)
+    return OnnxSqrt(  # noqa
+        red, op_version=op_version, output_names=output_names)
+
+
+def OnnxReshapeApi13(*x, allowzero=0, op_version=None,
+                     output_names=None):
+    """
+    Adds operator Reshape with opset>=14 following API from opset 13.
+    """
+    if op_version is None:
+        raise RuntimeError("op_version must be specified.")
+    if op_version is None or op_version >= 14:
+        return OnnxReshape(  # noqa
+            *x, allowzero=allowzero,
+            op_version=op_version, output_names=output_names)
+    if op_version >= 13:
+        return OnnxReshape_13(  # noqa
+            *x, op_version=op_version,
+            output_names=output_names)
+    return OnnxReshape_5(  # noqa
+        *x, op_version=op_version,
+        output_names=output_names)
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/skl2onnx/algebra/sklearn_ops.html b/_modules/skl2onnx/algebra/sklearn_ops.html index f2f8916fe..fc023014f 100644 --- a/_modules/skl2onnx/algebra/sklearn_ops.html +++ b/_modules/skl2onnx/algebra/sklearn_ops.html @@ -1,367 +1,455 @@ - - - - - - - - skl2onnx.algebra.sklearn_ops — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.algebra.sklearn_ops

-# SPDX-License-Identifier: Apache-2.0
-
-"""
-Place holder for all ONNX operators.
-"""
-import sys
-import textwrap
-from sklearn.pipeline import Pipeline, FeatureUnion
-try:
-    from sklearn.compose import ColumnTransformer
-except ImportError:
-    # ColumnTransformer was introduced in 0.20.
-    ColumnTransformer = None
-from .onnx_subgraph_operator_mixin import OnnxSubGraphOperatorMixin
-
-
-def ClassFactorySklearn(skl_obj, class_name, doc, conv, shape_calc, alias):
-    from .onnx_subgraph_operator_mixin import OnnxSubGraphOperatorMixin
-
-    newclass = type(class_name, (OnnxSubGraphOperatorMixin, skl_obj),
-                    {'__doc__': doc,
-                     'operator_name': skl_obj.__name__,
-                     '_fct_converter': conv,
-                     '_fct_shape_calc': shape_calc,
-                     'input_range': [1, 1e9],
-                     'output_range': [1, 1e9],
-                     'op_version': None,
-                     'alias': alias,
-                     '__module__': __name__})
-    return newclass
-
-
-def dynamic_class_creation_sklearn():
-    """
-    Automatically generates classes for each of the converter.
-    """
-    from ..common._registration import _shape_calculator_pool, _converter_pool
-    from .._supported_operators import sklearn_operator_name_map
-
-    cls = {}
-
-    for skl_obj, name in sklearn_operator_name_map.items():
-        if skl_obj is None:
-            continue
-        conv = _converter_pool[name]
-        shape_calc = _shape_calculator_pool[name]
-        skl_name = skl_obj.__name__
-        doc = ["OnnxOperatorMixin for **{}**".format(skl_name), ""]
-        if conv.__doc__:
-            doc.append(textwrap.dedent(conv.__doc__))
-        doc = "\n".join(doc)
-        prefix = "Sklearn" if "sklearn" in str(skl_obj) else ""
-        class_name = "Onnx" + prefix + skl_name
-        try:
-            cl = ClassFactorySklearn(skl_obj, class_name,
-                                     doc, conv, shape_calc,
-                                     name)
-        except TypeError:
-            continue
-        cls[class_name] = cl
-    return cls
-
-
-def _update_module():
-    """
-    Dynamically updates the module with operators defined
-    by *ONNX*.
-    """
-    res = dynamic_class_creation_sklearn()
-    this = sys.modules[__name__]
-    for k, v in res.items():
-        setattr(this, k, v)
-
-
-def find_class(skl_cl):
-    """
-    Finds the corresponding :class:`OnnxSubGraphOperatorMixin`
-    class to *skl_cl*.
-    """
-    name = skl_cl.__name__
-    prefix = "OnnxSklearn"
-    full_name = prefix + name
-    this = sys.modules[__name__]
-    if not hasattr(this, full_name):
-        available = sorted(filter(lambda n: prefix in n, sys.modules))
-        raise RuntimeError(
-            "Unable to find a class for '{}' in\n{}".format(
-                skl_cl.__name__, "\n".join(available)))
-    cl = getattr(this, full_name)
-    if "automation" in str(cl):
-        raise RuntimeError("Dynamic operation issue with class "
-                           "name '{}' from '{}'.".format(cl, __name__))
-    return cl
-
-
-
[docs]class OnnxSklearnPipeline(Pipeline, OnnxSubGraphOperatorMixin): - """ - Combines `Pipeline - <https://scikit-learn.org/stable/modules/generated/ - sklearn.pipeline.Pipeline.html>`_ and - :class:`OnnxSubGraphOperatorMixin`. - """ - - def __init__(self, steps, memory=None, verbose=False, op_version=None): - Pipeline.__init__(self, steps=steps, memory=memory, verbose=verbose) - OnnxSubGraphOperatorMixin.__init__(self) - self.op_version = op_version
- - -if ColumnTransformer is not None: - -
[docs] class OnnxSklearnColumnTransformer(ColumnTransformer, - OnnxSubGraphOperatorMixin): - """ - Combines `ColumnTransformer - <https://scikit-learn.org/stable/modules/generated/ - sklearn.compose.ColumnTransformer.html>`_ and - :class:`OnnxSubGraphOperatorMixin`. - """ - - def __init__(self, op_version=None): - self.op_version = op_version
- - -
[docs]class OnnxSklearnFeatureUnion(FeatureUnion, OnnxSubGraphOperatorMixin): - """ - Combines `FeatureUnion - <https://scikit-learn.org/stable/modules/generated/ - sklearn.pipeline.FeatureUnion.html>`_ and - :class:`OnnxSubGraphOperatorMixin`. - """ - - def __init__(self, op_version=None): - self.op_version = op_version
- - -_update_module() -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.algebra.sklearn_ops - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.algebra.sklearn_ops

+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Place holder for all ONNX operators.
+"""
+import sys
+import textwrap
+from sklearn.pipeline import Pipeline, FeatureUnion
+try:
+    from sklearn.compose import ColumnTransformer
+except ImportError:
+    # ColumnTransformer was introduced in 0.20.
+    ColumnTransformer = None
+from .onnx_subgraph_operator_mixin import OnnxSubGraphOperatorMixin
+
+
+def ClassFactorySklearn(skl_obj, class_name, doc, conv, shape_calc, alias):
+    from .onnx_subgraph_operator_mixin import OnnxSubGraphOperatorMixin
+
+    newclass = type(class_name, (OnnxSubGraphOperatorMixin, skl_obj),
+                    {'__doc__': doc,
+                     'operator_name': skl_obj.__name__,
+                     '_fct_converter': conv,
+                     '_fct_shape_calc': shape_calc,
+                     'input_range': [1, 1e9],
+                     'output_range': [1, 1e9],
+                     'op_version': None,
+                     'alias': alias,
+                     '__module__': __name__})
+    return newclass
+
+
+def dynamic_class_creation_sklearn():
+    """
+    Automatically generates classes for each of the converter.
+    """
+    from ..common._registration import _shape_calculator_pool, _converter_pool
+    from .._supported_operators import sklearn_operator_name_map
+
+    cls = {}
+
+    for skl_obj, name in sklearn_operator_name_map.items():
+        if skl_obj is None:
+            continue
+        conv = _converter_pool[name]
+        shape_calc = _shape_calculator_pool[name]
+        skl_name = skl_obj.__name__
+        doc = ["OnnxOperatorMixin for **{}**".format(skl_name), ""]
+        if conv.__doc__:
+            doc.append(textwrap.dedent(conv.__doc__))
+        doc = "\n".join(doc)
+        prefix = "Sklearn" if "sklearn" in str(skl_obj) else ""
+        class_name = "Onnx" + prefix + skl_name
+        try:
+            cl = ClassFactorySklearn(skl_obj, class_name,
+                                     doc, conv, shape_calc,
+                                     name)
+        except TypeError:
+            continue
+        cls[class_name] = cl
+    return cls
+
+
+def _update_module():
+    """
+    Dynamically updates the module with operators defined
+    by *ONNX*.
+    """
+    res = dynamic_class_creation_sklearn()
+    this = sys.modules[__name__]
+    for k, v in res.items():
+        setattr(this, k, v)
+
+
+def find_class(skl_cl):
+    """
+    Finds the corresponding :class:`OnnxSubGraphOperatorMixin`
+    class to *skl_cl*.
+    """
+    name = skl_cl.__name__
+    prefix = "OnnxSklearn"
+    full_name = prefix + name
+    this = sys.modules[__name__]
+    if not hasattr(this, full_name):
+        available = sorted(filter(lambda n: prefix in n, sys.modules))
+        raise RuntimeError(
+            "Unable to find a class for '{}' in\n{}".format(
+                skl_cl.__name__, "\n".join(available)))
+    cl = getattr(this, full_name)
+    if "automation" in str(cl):
+        raise RuntimeError("Dynamic operation issue with class "
+                           "name '{}' from '{}'.".format(cl, __name__))
+    return cl
+
+
+
[docs]class OnnxSklearnPipeline(Pipeline, OnnxSubGraphOperatorMixin): + """ + Combines `Pipeline + <https://scikit-learn.org/stable/modules/generated/ + sklearn.pipeline.Pipeline.html>`_ and + :class:`OnnxSubGraphOperatorMixin`. + """ + + def __init__(self, steps, memory=None, verbose=False, op_version=None): + Pipeline.__init__(self, steps=steps, memory=memory, verbose=verbose) + OnnxSubGraphOperatorMixin.__init__(self) + self.op_version = op_version
+ + +if ColumnTransformer is not None: + +
[docs] class OnnxSklearnColumnTransformer(ColumnTransformer, + OnnxSubGraphOperatorMixin): + """ + Combines `ColumnTransformer + <https://scikit-learn.org/stable/modules/generated/ + sklearn.compose.ColumnTransformer.html>`_ and + :class:`OnnxSubGraphOperatorMixin`. + """ + + def __init__(self, op_version=None): + self.op_version = op_version
+ + +
[docs]class OnnxSklearnFeatureUnion(FeatureUnion, OnnxSubGraphOperatorMixin): + """ + Combines `FeatureUnion + <https://scikit-learn.org/stable/modules/generated/ + sklearn.pipeline.FeatureUnion.html>`_ and + :class:`OnnxSubGraphOperatorMixin`. + """ + + def __init__(self, op_version=None): + self.op_version = op_version
+ + +_update_module() +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/common/_container.html b/_modules/skl2onnx/common/_container.html index c7edbdd5c..3aa47e813 100644 --- a/_modules/skl2onnx/common/_container.html +++ b/_modules/skl2onnx/common/_container.html @@ -1,1089 +1,1201 @@ - - - - - - - - skl2onnx.common._container — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.common._container

-# SPDX-License-Identifier: Apache-2.0
-
-
-import inspect
-import re
-import sys
-import traceback
-import warnings
-from logging import getLogger
-import numpy as np
-from scipy.sparse import coo_matrix
-from onnx.defs import onnx_opset_version, get_all_schemas_with_history
-import onnx.onnx_cpp2py_export.defs as C
-from onnxconverter_common.onnx_ops import __dict__ as dict_apply_operation
-from ..proto import TensorProto
-from ..proto.onnx_helper_modified import (
-    make_node, ValueInfoProto, make_tensor, make_attribute
-)
-try:
-    from ..proto import SparseTensorProto
-    from ..proto.onnx_helper_modified import make_sparse_tensor
-except ImportError:
-    # onnx is too old.
-    SparseTensorProto = None
-    make_sparse_tensor = None
-from .utils import get_domain
-
-
-logger = getLogger('skl2onnx')
-
-
-def _get_operation_list():
-    """
-    Investigates this module to extract all ONNX functions
-    which needs to be converted with these functions.
-    """
-    regs = [re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
-                       "\\[?input_name"),
-            re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
-                       "\\[\\]"),
-            re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
-                       "inputs"),
-            re.compile("scope, '([A-Z][a-zA-Z0-9]*)', \\[?input_name"),
-            re.compile("op_type = '([A-Z][a-zA-Z0-9]*)'")]
-    res = {}
-    for k, v in dict_apply_operation.items():
-        if k.startswith("apply_") and callable(v):
-            found = None
-            source = inspect.getsource(v)
-            for reg in regs:
-                g = reg.search(source)
-                if g:
-                    found = g.groups()[0]
-                    break
-            if found is None:
-                continue
-            res[found] = v
-    return res
-
-
-def _build_options(model, defined_options, default_values,
-                   allowed_options, fail):
-    opts = {} if default_values is None else default_values
-    if defined_options is not None:
-        opts.update(defined_options.get(type(model), {}))
-        opts.update(defined_options.get(id(model), {}))
-    if allowed_options not in (None, 'passthrough'):
-        for k, v in opts.items():
-            if k not in allowed_options:
-                if fail:
-                    raise NameError(
-                        "Option '{}' not in {} for class '{}'.".format(
-                            k, list(sorted(allowed_options)),
-                            model.__class__.__name__))
-                return None
-            allowed = allowed_options[k]
-            if allowed is not None and v not in allowed and v is not None:
-                raise ValueError(
-                    "Unexpected value [{!r}] for option '{}'"
-                    " (it must be in {}) for model '{}'.".format(
-                        v, k, allowed, model.__class__.__name__))
-    elif fail and len(opts) != 0 and allowed_options != 'passthrough':
-        raise RuntimeError(
-            "Options {} are not registerd for model '{}'.".format(
-                list(sorted(opts)), model.__class__.__name__))
-    return opts
-
-
-_apply_operation_specific = _get_operation_list()
-
-
-class _WhiteBlackContainer:
-
-    def __init__(self, white_op=None, black_op=None, verbose=0):
-        self._white_op = white_op
-        self._black_op = black_op
-        self.verbose = verbose
-
-    def is_allowed(self, node_type):
-        """
-        Tells if a node is white listed or not black listed.
-        """
-        if isinstance(node_type, (list, tuple, set)):
-            return all(map(self.is_allowed, node_type))
-        try:
-            self.check_white_black_list(node_type)
-            return True
-        except RuntimeError:
-            return False
-
-    def check_white_black_list(self, node_type):
-        """
-        Checks a node type is allowed according to white
-        and black lists.
-        """
-        if self._white_op:
-            if node_type not in self._white_op:
-                raise RuntimeError(
-                    "Operator '{}' is not white listed.".format(node_type))
-        if self._black_op:
-            if node_type in self._black_op:
-                raise RuntimeError(
-                    "Operator '{}' is black listed.".format(node_type))
-
-    def debug(self, *args, **kwargs):
-        """
-        Log debug information while converting a model.
-        """
-        logger.debug(*args, **kwargs)
-
-
-class RawModelContainerNode(_WhiteBlackContainer):
-    """
-    This node is the carrier of the model we want to convert.
-    It provides an abstract layer so that our parsing
-    framework can work with models generated by different tools.
-    """
-
-    def __init__(self, raw_model, white_op=None, black_op=None, verbose=0):
-        """
-        :param raw_model: *scikit-learn* model to convert
-        """
-        _WhiteBlackContainer.__init__(
-            self, white_op=white_op, black_op=black_op, verbose=verbose)
-        self._raw_model = raw_model
-
-    @property
-    def raw_model(self):
-        return self._raw_model
-
-    @property
-    def input_names(self):
-        """
-        This function should return a list of strings. Each string
-        corresponds to an input variable name.
-        :return: a list of string
-        """
-        raise NotImplementedError()
-
-    @property
-    def output_names(self):
-        """
-        This function should return a list of strings. Each string
-        corresponds to an output variable name.
-        :return: a list of string
-        """
-        raise NotImplementedError()
-
-
-
[docs]class SklearnModelContainerNode(RawModelContainerNode): - """ - Main container for one *scikit-learn* model. - Every converter adds nodes to an existing container - which is converted into a *ONNX* graph by an instance of - :class:`Topology <skl2onnx.common._topology.Topology>`. - """ - - def __init__(self, sklearn_model, white_op=None, black_op=None, - verbose=0): - super(SklearnModelContainerNode, self).__init__( - sklearn_model, white_op=white_op, black_op=black_op, - verbose=verbose) - # Scikit-learn models have no input and output specified, - # so we create them and store them in this container. - self._inputs = [] - self._outputs = [] - - @property - def input_names(self): - return [variable.onnx_name for variable in self._inputs] - - @property - def output_names(self): - return [variable.onnx_name for variable in self._outputs] - - def add_input(self, variable): - # The order of adding variables matters. The final model's - # input names are sequentially added as this list - if variable not in self._inputs: - self._inputs.append(variable) - - def add_output(self, variable): - # The order of adding variables matters. The final model's - # output names are sequentially added as this list - if variable not in self._outputs: - self._outputs.append(variable)
- - -
[docs]class ModelComponentContainer(_WhiteBlackContainer): - """ - In the conversion phase, this class is used to collect all materials - required to build an *ONNX* *GraphProto*, which is encapsulated in a - *ONNX* *ModelProto*. - """ - - def __init__(self, target_opset, options=None, registered_models=None, - white_op=None, black_op=None, verbose=0): - """ - :param target_opset: number, for example, 7 for *ONNX 1.2*, and - 8 for *ONNX 1.3*. - :param options: see :ref:`l-conv-options` - :param registered_models: registered models - :param white_op: white list of ONNX nodes allowed - while converting a pipeline, if empty, all are allowed - :param black_op: black list of ONNX nodes allowed - while converting a pipeline, if empty, none are blacklisted - :param verbose: display information while converting - """ - _WhiteBlackContainer.__init__( - self, white_op=white_op, black_op=black_op, verbose=verbose) - # Inputs of ONNX graph. They are ValueInfoProto in ONNX. - self.inputs = [] - # Outputs of ONNX graph. They are ValueInfoProto in ONNX. - self.outputs = [] - # ONNX tensors (type: TensorProto). They are initializers of - # ONNX GraphProto. - self.initializers = [] - self.initializers_strings = {} - # Intermediate variables in ONNX computational graph. They are - # ValueInfoProto in ONNX. - self.value_info = [] - # ONNX nodes (type: NodeProto) used to define computation - # structure - self.nodes = [] - # ONNX operators' domain-version pair set. They will be added - # into opset_import field in the final ONNX model. - self.node_domain_version_pair_sets = set() - # The targeted ONNX operator set (referred to as opset) that - # matches the ONNX version. - if isinstance(target_opset, dict): - self.target_opset_all = target_opset - self.target_opset = target_opset.get('', None) - else: - self.target_opset = target_opset - self.target_opset_all = {'': target_opset} - # Additional options given to converters. - self.options = options - # All registered models. - self.registered_models = registered_models - - def swap_names(self, old_name, new_name): - """ - Swaps variables names. - - :param old_name: old name - :param new_name: new name - :return: list of impacted objects - """ - exc_list = {'Scan', 'Loop', 'If'} - for node in self.nodes: - if node.op_type not in exc_list: - continue - if (old_name in node.input or old_name in node.output or - new_name in node.input or new_name in node.output): - raise NotImplementedError( - "Unable to handle subgraphs for node type %r." - "(%r, %r)" % (node.op_type, old_name, new_name)) - res = [] - - for inp in self.inputs: - if inp.name == old_name: - inp.name = new_name - res.append(('Io', inp)) - elif inp.name == new_name: - inp.name = old_name - res.append(('In', inp)) - - for inp in self.outputs: - if inp.name == old_name: - inp.name = new_name - res.append(('Oo', inp)) - elif inp.name == new_name: - inp.name = old_name - res.append(('On', inp)) - - for inp in self.initializers: - if inp.name == old_name: - inp.name = new_name - res.append(('-o', inp)) - elif inp.name == new_name: - inp.name = old_name - res.append(('-n', inp)) - - for node in self.nodes: - modified = False - new_input = [] - for name in node.input: - if name == old_name: - name = new_name - modified = True - elif name == new_name: - name = old_name - modified = True - new_input.append(name) - new_output = [] - for name in node.output: - if name == old_name: - name = new_name - modified = True - elif name == new_name: - name = old_name - modified = True - new_output.append(name) - if modified: - if node.op_type in exc_list: - raise NotImplementedError( - "Unable to handle subgraphs for node type %r." - "" % node.op_type) - node.input[:] = new_input[:] - node.output[:] = new_output[:] - res.append(("n-", node)) - return res - - def __str__(self): - """ - Shows internal information. - """ - rows = [] - if self.inputs: - rows.append("INPUTS") - for inp in self.inputs: - rows.append( - " " + str(inp).replace(" ", "").replace("\n", " ")) - if self.outputs: - rows.append("OUTPUTS") - for out in self.outputs: - rows.append( - " " + str(out).replace(" ", "").replace("\n", " ")) - if self.initializers: - rows.append("INITIALIZERS") - for ini in self.initializers: - rows.append( - " " + str(ini).replace(" ", "").replace("\n", " ")) - if self.value_info: - rows.append("NODES") - for val in self.value_info: - rows.append( - " " + str(val).replace(" ", "").replace("\n", " ")) - if self.nodes: - rows.append("PROTO") - for nod in self.nodes: - rows.append( - " " + str(nod).replace(" ", "").replace("\n", " ")) - return "\n".join(rows) - - def _make_value_info(self, variable): - value_info = ValueInfoProto() - value_info.name = variable.full_name - value_info.type.CopyFrom(variable.type.to_onnx_type()) - if variable.type.doc_string: - value_info.doc_string = variable.type.doc_string - return value_info - -
[docs] def add_input(self, variable): - """ - Adds our *Variable* object defined _parser.py into the the input - list of the final ONNX model. - - :param variable: The Variable object to be added - """ - self.inputs.append(self._make_value_info(variable))
- -
[docs] def add_output(self, variable): - """ - Adds our *Variable* object defined *_parser.py* into the the - output list of the final ONNX model. - - :param variable: The Variable object to be added - """ - self.outputs.append(self._make_value_info(variable))
- - def add_options(self, model_id, options): - """ - Adds an option, for example, - ``add_options(id(clr), {'raw_scores': True})`` - tells the converter associated to ``clr`` to - use raw score instead of probabilities. - - :param model_id: class or ``id(instance)`` - :param options: dictionary with the new values - """ - if options is None: - return - if self.options is None: - self.options = {} - if model_id not in self.options: - self.options[model_id] = None - if self.options[model_id] is None: - self.options[model_id] = {} - self.options[model_id].update(options) - -
[docs] def add_initializer(self, name, onnx_type, shape, content): - """ - Adds a *TensorProto* into the initializer list of the final - ONNX model. - - :param name: Variable name in the produced ONNX model. - :param onnx_type: Element types allowed in ONNX tensor, e.g., - TensorProto.FLOAT and TensorProto.STRING. - :param shape: Tensor shape, a list of integers. - :param content: Flattened tensor values (i.e., a float list - or a float array). - :return: created tensor - """ - logger.debug("[Init] %r, %r, %r", name, onnx_type, shape) - sparse_tensor = None - tensor = None - - cached_value = None - if isinstance(content, TensorProto): - tensor = TensorProto() - tensor.data_type = content.data_type - tensor.name = name - tensor.raw_data = content.raw_data - tensor.dims.extend(content.dims) - elif shape is None and isinstance( - content, (np.float32, np.float64, np.int32, - np.int64, float, np.int8, np.uint8, - np.bool_, np.str_, str)): - tensor = make_tensor(name, onnx_type, [], [content]) - elif (SparseTensorProto is not None and - isinstance(content, SparseTensorProto)): - raise NotImplementedError("Not implemented yet.") - elif shape is None: - tensor = make_attribute(name, content) - elif isinstance(content, coo_matrix): - if SparseTensorProto is None: - raise RuntimeError( - "Sparse matrices require SparseTensorProto. Update onnx.") - values_tensor = make_tensor( - name + "_v", data_type=onnx_type, - dims=(len(content.data), ), vals=content.data) - indices = [i * content.shape[1] + j - for i, j in zip(content.row, content.col)] - indices_tensor = make_tensor( - name=name + "_i", data_type=TensorProto.INT64, - dims=(len(indices), ), vals=indices) - dense_shape = list(content.shape) - sparse_tensor = make_sparse_tensor( - values_tensor, indices_tensor, dense_shape) - - # cached value: same without names - values_tensor = make_tensor( - "_v", data_type=onnx_type, - dims=(len(content.data), ), vals=content.data) - indices_tensor = make_tensor( - name="_i", data_type=TensorProto.INT64, - dims=(len(indices), ), vals=indices) - cached_value = make_sparse_tensor( - values_tensor, indices_tensor, dense_shape) - - else: - if any(d is None for d in shape): - raise ValueError('Shape of initializer cannot contain None.') - if (hasattr(content, 'dtype') and - content.dtype in (bool, np.bool_)): - content = content.astype(np.int32) - try: - tensor = make_tensor(name, onnx_type, shape, content) - except TypeError as e: - raise TypeError( - "Unable to make a tensor name=%r " - "onnx_type=%r shape=%r content-type=%r." % ( - name, onnx_type, shape, type(content))) from e - - if tensor is not None: - if cached_value is None: - name = tensor.name - tensor.name = "tensor" - content = tensor.SerializeToString() - tensor.name = name - else: - content = cached_value.SerializeToString() - cached_name = self.initializers_strings.get(content, None) - if cached_name is None: - self.initializers_strings[content] = name - self.initializers.append(tensor) - return tensor - - self.add_node( - 'Identity', cached_name, name, op_version=self.target_opset, - name=name + '_op') - return name - - if sparse_tensor is not None: - content = cached_value.SerializeToString() - cached_name = self.initializers_strings.get(content, None) - if cached_name is None: - self.initializers_strings[content] = name - self.add_node( - 'Constant', [], [name], sparse_value=sparse_tensor, - op_version=self.target_opset, name=name + '_op') - return sparse_tensor - - self.add_node( - 'Identity', cached_name, name, op_version=self.target_opset, - name=name + '_op') - return name - - raise RuntimeError( - "Either tensor or sparse_tensor should be defined.")
- - def add_value_info(self, variable): - self.value_info.append(self._make_value_info(variable)) - - def _check_operator(self, op_type): - """ - Checks that if *op_type* is one of the operators defined in - :mod:`skl2onnx.common._apply_container`, then it was called - from a function defined in this submodule by looking - into the callstack. The test is enabled for *python >= 3.6*. - """ - if (op_type in _apply_operation_specific and - sys.version_info[:2] >= (3, 6)): - tb = traceback.extract_stack() - operation = [] - fct = _apply_operation_specific[op_type] - skl2 = False - for b in tb: - if "_apply_operation" in b.filename and b.name == fct.__name__: - operation.append(b) - if not skl2 and "skl2onnx" in b.filename: - skl2 = True - if skl2 and len(operation) == 0: - raise RuntimeError( - "Operator '{0}' should be added with function " - "'{1}' in submodule _apply_operation.".format( - op_type, fct.__name__)) - self.check_white_black_list(op_type) - -
[docs] def add_node(self, op_type, inputs, outputs, op_domain='', op_version=None, - name=None, **attrs): - """ - Adds a *NodeProto* into the node list of the final ONNX model. - If the input operator's domain-version information cannot be - found in our domain-version pool (a Python set), we may add it. - - :param op_type: A string (e.g., Pool and Conv) indicating the - type of the NodeProto - :param inputs: A list of strings. They are the input variables' - names of the considered NodeProto - :param outputs: A list of strings. They are the output - variables' names of the considered NodeProto - :param op_domain: The domain name (e.g., ai.onnx.ml) of the - operator we are trying to add. - :param op_version: The version number (e.g., 0 and 1) of the - operator we are trying to add. - :param name: name of the node, this name cannot be empty - :param attrs: A Python dictionary. Keys and values are - attributes' names and attributes' values, - respectively. - """ - if name is None or not isinstance( - name, str) or name == '': - name = "N%d" % len(self.nodes) - existing_names = set(n.name for n in self.nodes) - if name in existing_names: - name += "-N%d" % len(self.nodes) - - if op_domain is None: - op_domain = get_domain() - self._check_operator(op_type) - if op_version is None: - op_version = self._get_op_version(op_domain, op_type) - - if isinstance(inputs, str): - inputs = [inputs] - if isinstance(outputs, str): - outputs = [outputs] - logger.debug( - "[Node] %r - %r -> %r (name=%r)", - op_type, ",".join(inputs), ",".join(outputs), name) - try: - common = set(inputs) & set(outputs) - except TypeError as e: - raise TypeError( - "inputs or outputs are wrong, inputs=%r, outputs=%r, node=%r." - "" % (inputs, outputs, op_type)) from e - if common: - raise RuntimeError( - "inputs and outputs cannot have " - "variables in common {} in node '{}' " - "with name '{}'.".format(common, op_type, name)) - if not isinstance(inputs, list) or not all( - isinstance(s, str) for s in inputs): - type_list = ','.join(list(str(type(s)) for s in inputs)) - raise ValueError('Inputs must be a list of string but get [%s]' - % type_list) - if (not isinstance(outputs, list) or - not all(isinstance(s, str) for s in outputs)): - type_list = ','.join(list(str(type(s)) for s in outputs)) - raise ValueError('Outputs must be a list of string but get [%s]' - % type_list) - upd = {} - dtypes = set() - for k, v in attrs.items(): - if v is None: - raise ValueError( - 'Failed to create ONNX node. Undefined ' - 'attribute pair (%s, %s) found for type %r and ' - 'version %r' % ( - k, v, op_type, op_version)) - if isinstance(v, np.ndarray): - upd[k] = v - dtypes.add(v.dtype) - - if upd: - attrs.update(upd) - if 'dtype' in attrs: - raise RuntimeError("dtype should not be a parameter.") - if len(dtypes) == 0: - dtype = None - elif len(dtypes) == 1: - dtype = list(dtypes)[0] - elif (np.float32 in dtypes and np.float64 in dtypes): - raise RuntimeError( - "Unable to select a dtype among {}.".format(dtypes)) - else: - dtype = None - try: - node = make_node(op_type, inputs, outputs, name=name, - _dtype=dtype, **attrs) - except ValueError as e: - raise ValueError("Unable to create node '{}' with name='{}'." - "".format(op_type, name)) from e - node.domain = op_domain - - self.node_domain_version_pair_sets.add((op_domain, op_version)) - self.nodes.append(node) - if (self.target_opset is not None and - op_version is not None and - op_version > self.target_opset_any_domain(op_domain)): - raise RuntimeError( - "Opset number {} is higher than targeted opsets {} for " - "node type '{}' name='{}' input={} " - "output={} (domain='{}').".format( - op_version, self.target_opset_all, - node.op_type, node.name, - node.input, node.output, op_domain))
- - def target_opset_any_domain(self, domain): - target_opset = self.target_opset_all - if isinstance(target_opset, dict): - if domain in target_opset: - to = target_opset[domain] - else: - to = None - if to is None and domain == '': - to = onnx_opset_version() - if to is None: - smap = C.schema_version_map() - if domain in smap: - to = smap[domain][1] - if to is not None: - return to - # The domain is not registered in onnx, it is probably - # a custom domain. We assume the version is one. - return 1 - return self.target_opset - - @property - def target_opset_onnx(self): - return self.target_opset_any_domain('') - - def _get_op_version(self, domain, op_type): - """ - Determines the highest version of operator - *op_type* below or equal to *target_opset*. - """ - if not hasattr(self, '_op_versions'): - self._build_op_version() - key = domain, op_type - vers = self._op_versions.get(key, None) - if vers is None: - warnings.warn( - "Unable to find operator '{}' in domain '{}' in ONNX, " - "op_version is forced to 1.".format( - op_type, domain)) - vers = [1] - highest = self.target_opset_any_domain(domain) - pos = len(vers) - 1 - while pos >= 0: - if vers[pos] <= highest: - return vers[pos] - pos -= 1 - raise RuntimeError( - "Unable to find a suitable version for operator '{}' " - "in domain '{}'. Available versions: {}.".format( - op_type, domain, vers)) - - def _build_op_version(self): - res = {} - for schema in get_all_schemas_with_history(): - dom = schema.domain - name = schema.name - vers = schema.since_version - if (dom, name) not in res: - res[dom, name] = set() - res[dom, name].add(vers) - self._op_versions = {} - for k, v in res.items(): - self._op_versions[k] = list(sorted(v)) - - def _get_allowed_options(self, model): - if self.registered_models is not None: - if inspect.isfunction(model): - if model not in self.registered_models['aliases']: - return None - alias = self.registered_models['aliases'][model] - elif hasattr(model, 'alias'): - alias = model.alias - else: - if type(model) not in self.registered_models['aliases']: - return {} - alias = self.registered_models['aliases'][type(model)] - conv = self.registered_models['conv'][alias] - allowed = conv.get_allowed_options() - if allowed is None: - return {} - return allowed - clname = (str(model) if inspect.isfunction(model) - else model.__class__.__name__) - raise NotImplementedError( - "No registered models, no known allowed options " - "for model '{}'.".format(clname)) - - def validate_options(self, operator): - """ - Validates every operator allows the options - given by the user at converter time - for an operator. - """ - skl_op = operator.raw_operator - self.get_options(skl_op) - - def get_options(self, model, default_values=None, fail=True): - """ - Returns additional options for a model. - It first looks by class then by id (``id(model)``). - :param model: model being converted - :param default_values: default options (it is modified by - the function) - :param fail: fails if options not found - :return: dictionary - """ - return _build_options( - model, self.options, default_values, - self._get_allowed_options(model), fail=fail) - - def has_options(self, model, option_name): - """ - Tells if a model allows one specific options. - - :param model: model being converted - :return: boolean - """ - opts = self._get_allowed_options(model) - return option_name in opts - - def ensure_topological_order(self): - """ - Ensures and modifies the order of nodes to have - a topological order (every node in the list - can only be an input for a node later in this list). - The function raises an exception if a cycle is detected. - """ - order = {} - for inp in self.inputs: - name = inp.name - order[name] = 0 - for inp in self.initializers: - name = inp.name - order[name] = 0 - - n_iter = 0 - missing_ops = [] - while n_iter < len(self.nodes) * 2: - n_iter += 1 - missing_names = set() - missing_ops = [] - for node in self.nodes: - maxi = 0 - for name in node.input: - if name in order: - maxi = max(maxi, order[name]) - else: - maxi = None - missing_names.add(name) - break - if maxi is None: - missing_ops.append(node) - continue - key = id(node) - if key in order: - continue - maxi += 1 - order[key] = maxi - maxi += 1 - for name in node.output: - if name in order: - raise RuntimeError( - "Unable to sort a node (cycle). An output was " - "already ordered with name %r (iteration=%r)." - "" % (name, n_iter)) - order[name] = maxi - if len(missing_names) == 0: - continue - - if len(missing_ops) > 0: - def nstr(name): - if name in order: - return "%s#%d" % (name, order[name]) - return name - rows = ["%s(%s) -> [%s]" % ( - n.name or n.op_type, - ', '.join(map(nstr, n.input)), - ', '.join(n.output)) - for n in missing_ops] - rows.insert(0, "") - rows.append("--") - rows.append("--all-nodes--") - rows.append("--") - rows.extend("%s|%s(%s) -> [%s]" % ( - n.op_type, n.name or n.op_type, - ', '.join(map(nstr, n.input)), - ', '.join(n.output)) - for n in self.nodes) - raise RuntimeError( - "After %d iterations for %d nodes, still unable " - "to sort names %r. The graph may be disconnected. " - "List of operators: %s" % ( - n_iter, len(self.nodes), missing_names, - "\n".join(rows))) - - # Update order - topo = [(order[id(node)], str(id(node))) for node in self.nodes] - topo.sort() - map_nodes = {str(id(node)): node for node in self.nodes} - self.nodes = [map_nodes[_[1]] for _ in topo]
-
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.common._container - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.common._container

+# SPDX-License-Identifier: Apache-2.0
+
+
+import inspect
+import re
+import sys
+import traceback
+import warnings
+from logging import getLogger
+import numpy as np
+from scipy.sparse import coo_matrix
+from onnx.defs import onnx_opset_version, get_all_schemas_with_history
+import onnx.onnx_cpp2py_export.defs as C
+from onnxconverter_common.onnx_ops import __dict__ as dict_apply_operation
+from ..proto import TensorProto
+from ..proto.onnx_helper_modified import (
+    make_node, ValueInfoProto, make_tensor, make_attribute
+)
+try:
+    from ..proto import SparseTensorProto
+    from ..proto.onnx_helper_modified import make_sparse_tensor
+except ImportError:
+    # onnx is too old.
+    SparseTensorProto = None
+    make_sparse_tensor = None
+from .utils import get_domain
+
+
+logger = getLogger('skl2onnx')
+
+
+def _get_operation_list(use_shortlist=True):
+    """
+    Investigates this module to extract all ONNX functions
+    which needs to be converted with these functions.
+    """
+    # Reduce the scope of method _check_operator,
+    # it retrieves the stack trace and it takes a
+    # significant amount of time.
+    # This was mostly used to catch errors difficult to catch
+    # otherwise.
+    if use_shortlist:
+        shortlist = {'Clip', 'Normalizer', 'Upsample'}
+    else:
+        shortlist = None
+    regs = [re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
+                       "\\[?input_name"),
+            re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
+                       "\\[\\]"),
+            re.compile("container.add_node[(]'([A-Z][a-zA-Z0-9]*)', "
+                       "inputs"),
+            re.compile("scope, '([A-Z][a-zA-Z0-9]*)', \\[?input_name"),
+            re.compile("op_type = '([A-Z][a-zA-Z0-9]*)'")]
+    res = {}
+    for k, v in dict_apply_operation.items():
+        if k.startswith("apply_") and callable(v):
+            found = None
+            source = inspect.getsource(v)
+            for reg in regs:
+                g = reg.search(source)
+                if g:
+                    found = g.groups()[0]
+                    break
+            if found is None:
+                continue
+            if shortlist and found not in shortlist:
+                continue
+            res[found] = v
+    return res
+
+
+def _build_options(model, defined_options, default_values,
+                   allowed_options, fail):
+    opts = {} if default_values is None else default_values
+    if defined_options is not None:
+        opts.update(defined_options.get(type(model), {}))
+        opts.update(defined_options.get(id(model), {}))
+    if allowed_options not in (None, 'passthrough'):
+        for k, v in opts.items():
+            if k not in allowed_options:
+                if fail:
+                    raise NameError(
+                        "Option '{}' not in {} for class '{}'.".format(
+                            k, list(sorted(allowed_options)),
+                            model.__class__.__name__))
+                return None
+            allowed = allowed_options[k]
+            if allowed is not None and v not in allowed and v is not None:
+                raise ValueError(
+                    "Unexpected value [{!r}] for option '{}'"
+                    " (it must be in {}) for model '{}'.".format(
+                        v, k, allowed, model.__class__.__name__))
+    elif fail and len(opts) != 0 and allowed_options != 'passthrough':
+        raise RuntimeError(
+            "Options {} are not registerd for model '{}'.".format(
+                list(sorted(opts)), model.__class__.__name__))
+    return opts
+
+
+_apply_operation_specific = _get_operation_list()
+
+
+class _WhiteBlackContainer:
+
+    def __init__(self, white_op=None, black_op=None, verbose=0):
+        self._white_op = white_op
+        self._black_op = black_op
+        self.verbose = verbose
+
+    def is_allowed(self, node_type):
+        """
+        Tells if a node is white listed or not black listed.
+        """
+        if isinstance(node_type, (list, tuple, set)):
+            return all(map(self.is_allowed, node_type))
+        try:
+            self.check_white_black_list(node_type)
+            return True
+        except RuntimeError:
+            return False
+
+    def check_white_black_list(self, node_type):
+        """
+        Checks a node type is allowed according to white
+        and black lists.
+        """
+        if self._white_op:
+            if node_type not in self._white_op:
+                raise RuntimeError(
+                    "Operator '{}' is not white listed.".format(node_type))
+        if self._black_op:
+            if node_type in self._black_op:
+                raise RuntimeError(
+                    "Operator '{}' is black listed.".format(node_type))
+
+    def debug(self, *args, **kwargs):
+        """
+        Log debug information while converting a model.
+        """
+        logger.debug(*args, **kwargs)
+
+
+class RawModelContainerNode(_WhiteBlackContainer):
+    """
+    This node is the carrier of the model we want to convert.
+    It provides an abstract layer so that our parsing
+    framework can work with models generated by different tools.
+    """
+
+    def __init__(self, raw_model, white_op=None, black_op=None, verbose=0):
+        """
+        :param raw_model: *scikit-learn* model to convert
+        """
+        _WhiteBlackContainer.__init__(
+            self, white_op=white_op, black_op=black_op, verbose=verbose)
+        self._raw_model = raw_model
+
+    @property
+    def raw_model(self):
+        return self._raw_model
+
+    @property
+    def input_names(self):
+        """
+        This function should return a list of strings. Each string
+        corresponds to an input variable name.
+        :return: a list of string
+        """
+        raise NotImplementedError()
+
+    @property
+    def output_names(self):
+        """
+        This function should return a list of strings. Each string
+        corresponds to an output variable name.
+        :return: a list of string
+        """
+        raise NotImplementedError()
+
+
+
[docs]class SklearnModelContainerNode(RawModelContainerNode): + """ + Main container for one *scikit-learn* model. + Every converter adds nodes to an existing container + which is converted into a *ONNX* graph by an instance of + :class:`Topology <skl2onnx.common._topology.Topology>`. + """ + + def __init__(self, sklearn_model, white_op=None, black_op=None, + verbose=0): + super(SklearnModelContainerNode, self).__init__( + sklearn_model, white_op=white_op, black_op=black_op, + verbose=verbose) + # Scikit-learn models have no input and output specified, + # so we create them and store them in this container. + self._inputs = [] + self._outputs = [] + + @property + def input_names(self): + return [variable.onnx_name for variable in self._inputs] + + @property + def output_names(self): + return [variable.onnx_name for variable in self._outputs] + + def add_input(self, variable): + # The order of adding variables matters. The final model's + # input names are sequentially added as this list + if variable not in self._inputs: + self._inputs.append(variable) + + def add_output(self, variable): + # The order of adding variables matters. The final model's + # output names are sequentially added as this list + if variable not in self._outputs: + self._outputs.append(variable)
+ + +
[docs]class ModelComponentContainer(_WhiteBlackContainer): + """ + In the conversion phase, this class is used to collect all materials + required to build an *ONNX* *GraphProto*, which is encapsulated in a + *ONNX* *ModelProto*. + """ + + def __init__(self, target_opset, options=None, registered_models=None, + white_op=None, black_op=None, verbose=0): + """ + :param target_opset: number, for example, 7 for *ONNX 1.2*, and + 8 for *ONNX 1.3*. + :param options: see :ref:`l-conv-options` + :param registered_models: registered models + :param white_op: white list of ONNX nodes allowed + while converting a pipeline, if empty, all are allowed + :param black_op: black list of ONNX nodes allowed + while converting a pipeline, if empty, none are blacklisted + :param verbose: display information while converting + """ + _WhiteBlackContainer.__init__( + self, white_op=white_op, black_op=black_op, verbose=verbose) + # Inputs of ONNX graph. They are ValueInfoProto in ONNX. + self.inputs = [] + # Outputs of ONNX graph. They are ValueInfoProto in ONNX. + self.outputs = [] + # ONNX tensors (type: TensorProto). They are initializers of + # ONNX GraphProto. + self.initializers = [] + self.initializers_strings = {} + # Intermediate variables in ONNX computational graph. They are + # ValueInfoProto in ONNX. + self.value_info = [] + # ONNX nodes (type: NodeProto) used to define computation + # structure + self.nodes = [] + # ONNX operators' domain-version pair set. They will be added + # into opset_import field in the final ONNX model. + self.node_domain_version_pair_sets = set() + # The targeted ONNX operator set (referred to as opset) that + # matches the ONNX version. + if isinstance(target_opset, dict): + self.target_opset_all = target_opset + self.target_opset = target_opset.get('', None) + else: + self.target_opset = target_opset + self.target_opset_all = {'': target_opset} + # Additional options given to converters. + self.options = options + # All registered models. + self.registered_models = registered_models + + def swap_names(self, old_name, new_name): + """ + Swaps variables names. + + :param old_name: old name + :param new_name: new name + :return: list of impacted objects + """ + exc_list = {'Scan', 'Loop', 'If'} + for node in self.nodes: + if node.op_type not in exc_list: + continue + if (old_name in node.input or old_name in node.output or + new_name in node.input or new_name in node.output): + raise NotImplementedError( + "Unable to handle subgraphs for node type %r." + "(%r, %r)" % (node.op_type, old_name, new_name)) + res = [] + + for inp in self.inputs: + if inp.name == old_name: + inp.name = new_name + res.append(('Io', inp)) + elif inp.name == new_name: + inp.name = old_name + res.append(('In', inp)) + + for inp in self.outputs: + if inp.name == old_name: + inp.name = new_name + res.append(('Oo', inp)) + elif inp.name == new_name: + inp.name = old_name + res.append(('On', inp)) + + for inp in self.initializers: + if inp.name == old_name: + inp.name = new_name + res.append(('-o', inp)) + elif inp.name == new_name: + inp.name = old_name + res.append(('-n', inp)) + + for node in self.nodes: + modified = False + new_input = [] + for name in node.input: + if name == old_name: + name = new_name + modified = True + elif name == new_name: + name = old_name + modified = True + new_input.append(name) + new_output = [] + for name in node.output: + if name == old_name: + name = new_name + modified = True + elif name == new_name: + name = old_name + modified = True + new_output.append(name) + if modified: + if node.op_type in exc_list: + raise NotImplementedError( + "Unable to handle subgraphs for node type %r." + "" % node.op_type) + node.input[:] = new_input[:] + node.output[:] = new_output[:] + res.append(("n-", node)) + return res + + def __str__(self): + """ + Shows internal information. + """ + rows = [] + if self.inputs: + rows.append("INPUTS") + for inp in self.inputs: + rows.append( + " " + str(inp).replace(" ", "").replace("\n", " ")) + if self.outputs: + rows.append("OUTPUTS") + for out in self.outputs: + rows.append( + " " + str(out).replace(" ", "").replace("\n", " ")) + if self.initializers: + rows.append("INITIALIZERS") + for ini in self.initializers: + rows.append( + " " + str(ini).replace(" ", "").replace("\n", " ")) + if self.value_info: + rows.append("NODES") + for val in self.value_info: + rows.append( + " " + str(val).replace(" ", "").replace("\n", " ")) + if self.nodes: + rows.append("PROTO") + for nod in self.nodes: + rows.append( + " " + str(nod).replace(" ", "").replace("\n", " ")) + return "\n".join(rows) + + def _make_value_info(self, variable): + value_info = ValueInfoProto() + value_info.name = variable.full_name + value_info.type.CopyFrom(variable.type.to_onnx_type()) + if variable.type.doc_string: + value_info.doc_string = variable.type.doc_string + return value_info + +
[docs] def add_input(self, variable): + """ + Adds our *Variable* object defined _parser.py into the the input + list of the final ONNX model. + + :param variable: The Variable object to be added + """ + self.inputs.append(self._make_value_info(variable))
+ +
[docs] def add_output(self, variable): + """ + Adds our *Variable* object defined *_parser.py* into the the + output list of the final ONNX model. + + :param variable: The Variable object to be added + """ + self.outputs.append(self._make_value_info(variable))
+ + def add_options(self, model_id, options): + """ + Adds an option, for example, + ``add_options(id(clr), {'raw_scores': True})`` + tells the converter associated to ``clr`` to + use raw score instead of probabilities. + + :param model_id: class or ``id(instance)`` + :param options: dictionary with the new values + """ + if options is None: + return + if self.options is None: + self.options = {} + if model_id not in self.options: + self.options[model_id] = None + if self.options[model_id] is None: + self.options[model_id] = {} + self.options[model_id].update(options) + +
[docs] def add_initializer(self, name, onnx_type, shape, content): + """ + Adds a *TensorProto* into the initializer list of the final + ONNX model. + + :param name: Variable name in the produced ONNX model. + :param onnx_type: Element types allowed in ONNX tensor, e.g., + TensorProto.FLOAT and TensorProto.STRING. + :param shape: Tensor shape, a list of integers. + :param content: Flattened tensor values (i.e., a float list + or a float array). + :return: created tensor + """ + logger.debug("[Init] %r, %r, %r", name, onnx_type, shape) + sparse_tensor = None + tensor = None + + cached_value = None + if isinstance(content, TensorProto): + tensor = TensorProto() + tensor.data_type = content.data_type + tensor.name = name + tensor.raw_data = content.raw_data + tensor.dims.extend(content.dims) + elif shape is None and isinstance( + content, (np.float32, np.float64, np.int32, + np.int64, float, np.int8, np.uint8, + np.bool_, np.str_, str)): + tensor = make_tensor(name, onnx_type, [], [content]) + elif (SparseTensorProto is not None and + isinstance(content, SparseTensorProto)): + raise NotImplementedError("Not implemented yet.") + elif shape is None: + tensor = make_attribute(name, content) + elif isinstance(content, coo_matrix): + if SparseTensorProto is None: + raise RuntimeError( + "Sparse matrices require SparseTensorProto. Update onnx.") + values_tensor = make_tensor( + name + "_v", data_type=onnx_type, + dims=(len(content.data), ), vals=content.data) + indices = [i * content.shape[1] + j + for i, j in zip(content.row, content.col)] + indices_tensor = make_tensor( + name=name + "_i", data_type=TensorProto.INT64, + dims=(len(indices), ), vals=indices) + dense_shape = list(content.shape) + sparse_tensor = make_sparse_tensor( + values_tensor, indices_tensor, dense_shape) + + # cached value: same without names + values_tensor = make_tensor( + "_v", data_type=onnx_type, + dims=(len(content.data), ), vals=content.data) + indices_tensor = make_tensor( + name="_i", data_type=TensorProto.INT64, + dims=(len(indices), ), vals=indices) + cached_value = make_sparse_tensor( + values_tensor, indices_tensor, dense_shape) + + else: + if any(d is None for d in shape): + raise ValueError('Shape of initializer cannot contain None.') + if (hasattr(content, 'dtype') and + content.dtype in (bool, np.bool_)): + content = content.astype(np.int32) + try: + tensor = make_tensor(name, onnx_type, shape, content) + except TypeError as e: + raise TypeError( + "Unable to make a tensor name=%r " + "onnx_type=%r shape=%r content-type=%r." % ( + name, onnx_type, shape, type(content))) from e + + if tensor is not None: + if cached_value is None: + name = tensor.name + tensor.name = "tensor" + content = tensor.SerializeToString() + tensor.name = name + else: + content = cached_value.SerializeToString() + cached_name = self.initializers_strings.get(content, None) + if cached_name is None: + self.initializers_strings[content] = name + self.initializers.append(tensor) + return tensor + + self.add_node( + 'Identity', cached_name, name, op_version=self.target_opset, + name=name + '_op') + return name + + if sparse_tensor is not None: + content = cached_value.SerializeToString() + cached_name = self.initializers_strings.get(content, None) + if cached_name is None: + self.initializers_strings[content] = name + self.add_node( + 'Constant', [], [name], sparse_value=sparse_tensor, + op_version=self.target_opset, name=name + '_op') + return sparse_tensor + + self.add_node( + 'Identity', cached_name, name, op_version=self.target_opset, + name=name + '_op') + return name + + raise RuntimeError( + "Either tensor or sparse_tensor should be defined.")
+ + def add_value_info(self, variable): + self.value_info.append(self._make_value_info(variable)) + + def _check_operator(self, op_type): + """ + Checks that if *op_type* is one of the operators defined in + :mod:`skl2onnx.common._apply_container`, then it was called + from a function defined in this submodule by looking + into the callstack. The test is enabled for *python >= 3.6*. + """ + if (op_type in _apply_operation_specific and + sys.version_info[:2] >= (3, 6)): + tb = traceback.extract_stack() + operation = [] + fct = _apply_operation_specific[op_type] + skl2 = False + for b in tb: + if "_apply_operation" in b.filename and b.name == fct.__name__: + operation.append(b) + if not skl2 and "skl2onnx" in b.filename: + skl2 = True + if skl2 and len(operation) == 0: + raise RuntimeError( + "Operator '{0}' should be added with function " + "'{1}' in submodule _apply_operation.".format( + op_type, fct.__name__)) + self.check_white_black_list(op_type) + +
[docs] def add_node(self, op_type, inputs, outputs, op_domain='', op_version=None, + name=None, **attrs): + """ + Adds a *NodeProto* into the node list of the final ONNX model. + If the input operator's domain-version information cannot be + found in our domain-version pool (a Python set), we may add it. + + :param op_type: A string (e.g., Pool and Conv) indicating the + type of the NodeProto + :param inputs: A list of strings. They are the input variables' + names of the considered NodeProto + :param outputs: A list of strings. They are the output + variables' names of the considered NodeProto + :param op_domain: The domain name (e.g., ai.onnx.ml) of the + operator we are trying to add. + :param op_version: The version number (e.g., 0 and 1) of the + operator we are trying to add. + :param name: name of the node, this name cannot be empty + :param attrs: A Python dictionary. Keys and values are + attributes' names and attributes' values, + respectively. + """ + if ("axes" in attrs and + (attrs["axes"] is None or + not isinstance(attrs["axes"], (list, np.ndarray)))): + raise TypeError( + f"axes must be a list or an array not " + f"{type(attrs['axes'])}.") + if name is None or not isinstance( + name, str) or name == '': + name = f"N{len(self.nodes)}" + existing_names = set(n.name for n in self.nodes) + if name in existing_names: + name += f"-N{len(self.nodes)}" + + if op_domain is None: + op_domain = get_domain() + self._check_operator(op_type) + if op_version is None: + op_version = self._get_op_version(op_domain, op_type) + + if isinstance(inputs, str): + inputs = [inputs] + if isinstance(outputs, str): + outputs = [outputs] + logger.debug( + "[Node] %r - %r -> %r (name=%r)", + op_type, ",".join(inputs), ",".join(outputs), name) + try: + common = set(inputs) & set(outputs) + except TypeError as e: + raise TypeError( + "inputs or outputs are wrong, inputs=%r, outputs=%r, node=%r." + "" % (inputs, outputs, op_type)) from e + if common: + raise RuntimeError( + "inputs and outputs cannot have " + "variables in common {} in node '{}' " + "with name '{}'.".format(common, op_type, name)) + if not isinstance(inputs, list) or not all( + isinstance(s, str) for s in inputs): + type_list = ','.join(list(str(type(s)) for s in inputs)) + raise ValueError('Inputs must be a list of string but get [%s]' + % type_list) + if (not isinstance(outputs, list) or + not all(isinstance(s, str) for s in outputs)): + type_list = ','.join(list(str(type(s)) for s in outputs)) + raise ValueError('Outputs must be a list of string but get [%s]' + % type_list) + upd = {} + dtypes = set() + for k, v in attrs.items(): + if v is None: + raise ValueError( + 'Failed to create ONNX node. Undefined ' + 'attribute pair (%s, %s) found for type %r and ' + 'version %r' % ( + k, v, op_type, op_version)) + if isinstance(v, np.ndarray): + upd[k] = v + dtypes.add(v.dtype) + + if upd: + attrs.update(upd) + if 'dtype' in attrs and op_type != 'EyeLike': + raise RuntimeError("dtype should not be a parameter.") + if len(dtypes) == 0: + dtype = None + elif len(dtypes) == 1: + dtype = list(dtypes)[0] + elif (np.float32 in dtypes and np.float64 in dtypes): + raise RuntimeError( + "Unable to select a dtype among {}.".format(dtypes)) + else: + dtype = None + try: + node = make_node(op_type, inputs, outputs, name=name, + _dtype=dtype, **attrs) + except ValueError as e: + raise ValueError("Unable to create node '{}' with name='{}'." + "".format(op_type, name)) from e + node.domain = op_domain + + self.node_domain_version_pair_sets.add((op_domain, op_version)) + self.nodes.append(node) + if (self.target_opset is not None and + op_version is not None and + op_version > self.target_opset_any_domain(op_domain)): + raise RuntimeError( + "Opset number {} is higher than targeted opsets {} for " + "node type '{}' name='{}' input={} " + "output={} (domain='{}').".format( + op_version, self.target_opset_all, + node.op_type, node.name, + node.input, node.output, op_domain))
+ + def target_opset_any_domain(self, domain): + target_opset = self.target_opset_all + if isinstance(target_opset, dict): + if domain in target_opset: + to = target_opset[domain] + else: + to = None + if to is None and domain == '': + to = onnx_opset_version() + if to is None: + smap = C.schema_version_map() + if domain in smap: + to = smap[domain][1] + if to is not None: + return to + # The domain is not registered in onnx, it is probably + # a custom domain. We assume the version is one. + return 1 + return self.target_opset + + @property + def target_opset_onnx(self): + return self.target_opset_any_domain('') + + def _get_op_version(self, domain, op_type): + """ + Determines the highest version of operator + *op_type* below or equal to *target_opset*. + """ + if not hasattr(self, '_op_versions'): + self._build_op_version() + key = domain, op_type + vers = self._op_versions.get(key, None) + if vers is None: + if domain == "com.microsoft": + # avoid a not necessarily necessary warning + vers = 1 + else: + warnings.warn( + "Unable to find operator '{}' in domain '{}' in ONNX, " + "op_version is forced to 1.".format( + op_type, domain)) + vers = [1] + highest = self.target_opset_any_domain(domain) + pos = len(vers) - 1 + while pos >= 0: + if vers[pos] <= highest: + return vers[pos] + pos -= 1 + raise RuntimeError( + "Unable to find a suitable version for operator '{}' " + "in domain '{}'. Available versions: {}.".format( + op_type, domain, vers)) + + def _build_op_version(self): + res = {} + for schema in get_all_schemas_with_history(): + dom = schema.domain + name = schema.name + vers = schema.since_version + if (dom, name) not in res: + res[dom, name] = set() + res[dom, name].add(vers) + self._op_versions = {} + for k, v in res.items(): + self._op_versions[k] = list(sorted(v)) + + def _get_allowed_options(self, model): + if self.registered_models is not None: + if inspect.isfunction(model): + if model not in self.registered_models['aliases']: + return None + alias = self.registered_models['aliases'][model] + elif hasattr(model, 'alias'): + alias = model.alias + else: + if type(model) not in self.registered_models['aliases']: + return {} + alias = self.registered_models['aliases'][type(model)] + conv = self.registered_models['conv'][alias] + allowed = conv.get_allowed_options() + if allowed is None: + return {} + return allowed + clname = (str(model) if inspect.isfunction(model) + else model.__class__.__name__) + raise NotImplementedError( + "No registered models, no known allowed options " + "for model '{}'.".format(clname)) + + def validate_options(self, operator): + """ + Validates every operator allows the options + given by the user at converter time + for an operator. + """ + skl_op = operator.raw_operator + self.get_options(skl_op) + + def get_options(self, model, default_values=None, fail=True): + """ + Returns additional options for a model. + It first looks by class then by id (``id(model)``). + :param model: model being converted + :param default_values: default options (it is modified by + the function) + :param fail: fails if options not found + :return: dictionary + """ + return _build_options( + model, self.options, default_values, + self._get_allowed_options(model), fail=fail) + + def has_options(self, model, option_name): + """ + Tells if a model allows one specific options. + + :param model: model being converted + :return: boolean + """ + opts = self._get_allowed_options(model) + return option_name in opts + + def ensure_topological_order(self): + """ + Ensures and modifies the order of nodes to have + a topological order (every node in the list + can only be an input for a node later in this list). + The function raises an exception if a cycle is detected. + """ + order = {} + for inp in self.inputs: + name = inp.name + order[name] = 0 + for inp in self.initializers: + name = inp.name + order[name] = 0 + + n_iter = 0 + missing_ops = [] + cont = True + while cont and n_iter < len(self.nodes) * 2: + n_iter += 1 + missing_names = set() + missing_ops = [] + cont = False + for node in self.nodes: + maxi = 0 + for name in node.input: + if name in order: + maxi = max(maxi, order[name]) + else: + maxi = None + missing_names.add(name) + break + if maxi is None: + missing_ops.append(node) + continue + key = id(node) + if key in order: + continue + cont = True + maxi += 1 + order[key] = maxi + maxi += 1 + for name in node.output: + if name in order: + raise RuntimeError( + "Unable to sort a node (cycle). An output was " + "already ordered with name %r (iteration=%r)." + "" % (name, n_iter)) + order[name] = maxi + if len(missing_names) == 0: + continue + + if len(missing_ops) > 0: + def nstr(name): + if name in order: + return "%s#%d" % (name, order[name]) + return name + rows = ["%s(%s) -> [%s]" % ( + n.name or n.op_type, + ', '.join(map(nstr, n.input)), + ', '.join(n.output)) + for n in missing_ops] + rows.insert(0, "") + rows.append("--") + rows.append("--all-nodes--") + rows.append("--") + rows.extend("%s|%s(%s) -> [%s]" % ( + n.op_type, n.name or n.op_type, + ', '.join(map(nstr, n.input)), + ', '.join(n.output)) + for n in self.nodes) + raise RuntimeError( + "After %d iterations for %d nodes, still unable " + "to sort names %r. The graph may be disconnected. " + "List of operators: %s" % ( + n_iter, len(self.nodes), missing_names, + "\n".join(rows))) + + # Update order + topo = sorted([(order[id(node)], str(id(node))) + for node in self.nodes]) + map_nodes = {str(id(node)): node for node in self.nodes} + self.nodes = [map_nodes[_[1]] for _ in topo]
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/common/_topology.html b/_modules/skl2onnx/common/_topology.html index be9017d34..196abc6f5 100644 --- a/_modules/skl2onnx/common/_topology.html +++ b/_modules/skl2onnx/common/_topology.html @@ -1,1800 +1,1888 @@ - - - - - - - - skl2onnx.common._topology — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.common._topology

-# SPDX-License-Identifier: Apache-2.0
-
-
-import re
-import warnings
-import pprint
-from logging import getLogger
-from collections import OrderedDict
-import numpy as np
-from onnx import onnx_pb as onnx_proto
-from onnxconverter_common.data_types import (  # noqa
-    DataType, TensorType,
-    FloatType, Int64Type, StringType,
-    DictionaryType, FloatTensorType,  # noqa
-    Int64TensorType, SequenceType,  # noqa
-    StringTensorType, DoubleTensorType,
-    Int32TensorType, BooleanTensorType,
-    DoubleTensorType)
-try:
-    from onnxconverter_common.data_types import (
-        Int8TensorType, UInt8TensorType)
-except ImportError:
-    Int8TensorType = None
-    UInt8TensorType = None
-from ..proto import (
-    get_opset_number_from_onnx,
-    get_latest_tested_opset_version
-)
-from ..proto.onnx_helper_modified import (
-    make_graph, make_model, make_tensor_value_info
-)
-from . import _registration
-from . import utils
-from .exceptions import MissingShapeCalculator, MissingConverter
-from ._container import ModelComponentContainer, _build_options
-from .onnx_optimisation_identity import onnx_remove_node_identity
-
-type_fct = type
-
-
-def _default_OPSET_TO_IR_VERSION():
-    return {
-        1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3,
-        7: 3, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7,
-        13: 7, 14: 7, 15: 8, 16: 8
-    }
-
-
-try:
-    from onnxconverter_common.topology import OPSET_TO_IR_VERSION
-    assert OPSET_TO_IR_VERSION[15] is not None
-except (ImportError, KeyError):
-    OPSET_TO_IR_VERSION = _default_OPSET_TO_IR_VERSION()
-
-OPSET_ML_TO_OPSET = {1: 11, 2: 15, 3: 16}
-
-logger = getLogger('skl2onnx')
-
-
-def get_default_opset_for_domain(domain):
-    """
-    Returns the associated for a domain given the main opset.
-    """
-    from .. import __max_supported_opset__ as main_opset
-    if domain == '':
-        return main_opset
-    if domain == 'ai.onnx.ml':
-        if main_opset >= 16:
-            return 3
-        if main_opset < 6:
-            return 1
-        return 2
-    if domain == 'ai.onnx.training':
-        return 1
-    return None
-
-
-
[docs]class Variable: - """ - Defines a variable which holds any data defined - from *ONNX* types. - """ - _UNIQUE_NUMBER_ = 0 - - def __init__(self, raw_name, onnx_name, scope, type=None): - """ - :param raw_name: A string indicating the variable's name in the - original model. Usually, it's the seed string - used to created its ONNX name (i.e., the - field *onnx_name* below). - :param onnx_name: A string indicating the variable's name in - the converted model - :param scope: A string. It's the name of the scope where this - variable is declared - :param type: A type object defined in .common.data_types.py; - e.g., FloatTensorType - """ - if not isinstance(raw_name, str): - raise TypeError( - "raw_name must be a string not '%s'." % raw_name.__class__) - if type is not None and not hasattr(type, 'shape'): - raise TypeError( - "Unexpected type for variable raw_name=%r, type=%r." % ( - raw_name, type)) - if not isinstance(onnx_name, str) or '(' in onnx_name: - if onnx_name.startswith('u(') and onnx_name[-1] == ')': - onnx_name0 = onnx_name - if scope is None: - onnx_name = "UU%03dUU" % Variable._UNIQUE_NUMBER_ - Variable._UNIQUE_NUMBER_ += 1 - else: - onnx_name = scope.get_unique_variable_name("U") - logger.debug( - '[Var] rename raw_name=%r, onnx_name=%r into %r', - raw_name, onnx_name0, onnx_name) - else: - raise TypeError( - "onnx_name must be a string not %r." % onnx_name) - - if type is not None: - shape = type.shape - if shape is not None: - not_none = [v for v in shape if v is not None] - if len(not_none) and min(not_none) == 0: - raise RuntimeError( - "A variable cannot be empty, raw_name=%r, " - "onnx_name=%r, shape=%r, type=%r." % ( - raw_name, onnx_name, shape, type)) - - self._raw_name = raw_name - self._onnx_name = onnx_name - self._scope = scope - self._type = type - self._parent = None - - # The following fields are bool variables used in parsing and - # compiling stages - self._is_fed = None - self._is_root = None - self._is_leaf = None - if self.type is not None and not isinstance(self.type, DataType): - raise TypeError( - "shape must be a DataType not {}.".format(self.type)) - if isinstance(self.type, TensorType): - shape = self.type.shape - if not isinstance(shape, (list, tuple)): - try: - shape = list(shape) - except TypeError: - raise TypeError("shape must be a tuple or a list not " - "{}.".format(type_fct(shape))) - for dim in shape: - if dim is None: - continue - if not isinstance(dim, (int, np.int32, np.int64, np.intc)): - raise TypeError( - "shape must contains integers not %r (type=%r)." - "" % (dim, dim.__class__)) - logger.debug('[Var] +%s', self) - - # links to operators using those variables - self.operators_outputs_ = [] - self.operators_inputs_ = [] - self._check() - - def _check(self): - if self.type is not None and self.type.shape is not None: - for k in self.type.shape: - if k is None: - continue - if not isinstance(k, (int, np.integer)): - raise ValueError( - "Unexpected type %r for shape %r." - "" % (type(k), self)) - - @property - def raw_name(self): - return self._raw_name - - @property - def onnx_name(self): - return self._onnx_name - - @property - def scope(self): - return self._scope - - @property - def type(self): - return self._type - - @property - def is_fed(self): - return self._is_fed - - @property - def is_root(self): - return self._is_root - - @property - def is_leaf(self): - return self._is_leaf - - def init_status(self, is_fed=None, is_root=None, is_leaf=None): - if is_fed is not None and is_fed != self.is_fed: - logger.debug( - '[Var] update is_fed=%r for %r, parent=%r', - is_fed, self, self._parent) - self._is_fed = is_fed - if is_root is not None and is_root != self.is_root: - logger.debug('[Var] update is_root=%r for %r', is_root, self) - self._is_root = is_root - if is_leaf is not None and is_leaf != self.is_leaf: - logger.debug('[Var] update is_leaf=%r for %r', is_leaf, self) - self._is_leaf = is_leaf - - def __setattr__(self, name, value): - if name == "type": - self.set_type(value) - elif name == "onnx_name": - raise AttributeError("You must use method set_onnx_name.") - elif name in {"is_fed", "is_root", "is_leaf"}: - raise AttributeError("You must use method init_status.") - elif name in {'scope', 'raw_name'}: - raise AttributeError("scope or raw_name cannot be changed.") - self.__dict__[name] = value - - def set_type(self, new_type): - if (new_type is None or isinstance(new_type, (str, Variable)) or - not hasattr(new_type, 'shape')): - raise TypeError( - "Unexpected new type for variable %r, new_type=%r." % ( - self, new_type)) - logger.debug('[Var] update type for %r', self) - self._type = new_type - self._check() - - def set_onnx_name(self, onnx_name): - if onnx_name != self._onnx_name: - logger.debug( - '[Var] update onnx_name, from %r to %r in %r', - self.onnx_name, onnx_name, self) - if self.scope is not None and not isinstance(self.scope, str): - self.scope.rename_onnx_name(self._onnx_name, onnx_name) - self._onnx_name = onnx_name - - def set_parent(self, operator): - if self._parent is not None: - raise RuntimeError( - "This variable is already the output of operator %r. " - "It cannot be the output of %r." % (self._parent, operator)) - logger.debug( - '[Var] set parent for %r, parent=%r', self, operator) - self._parent = operator - - def get_first_dimension(self): - """ - Returns the first dimension (batch dimension) or - None if not specified (shape is empty). - """ - if (self.type is None or self.type.shape is None or - len(self.type.shape) == 0): - return None - return self.type.shape[0] - - def get_second_dimension(self): - if (self.type is None or self.type.shape is None or - len(self.type.shape) < 2): - return None - return self.type.shape[1] - - @property - def full_name(self): - """ - Return a globally unique variable ID - """ - return self.onnx_name - - def __repr__(self): - return ("Variable('{0}', '{1}', type={2})".format( - self.raw_name, self.onnx_name, self.type)) - - @staticmethod - def from_pb(obj): - """ - Creates a data type from a protobuf object. - """ - def get_dim(d): - r = d.dim_value - if "dim_param" in str(d): - return None - if r == 0: - # dim_value is 0 when it is 0 or undefined - return 0 if "0" in str(d) else None - return r - - def get_shape(tt): - return [get_dim(tt.shape.dim[i]) - for i in range(len(tt.shape.dim))] - - if hasattr(obj, 'extend'): - return [Variable.from_pb(o) for o in obj] - - name = obj.name - if obj.type.tensor_type: - tt = obj.type.tensor_type - elem = tt.elem_type - shape = get_shape(tt) - if elem == onnx_proto.TensorProto.FLOAT: - ty = FloatTensorType(shape) - elif elem == onnx_proto.TensorProto.BOOL: - ty = BooleanTensorType(shape) - elif elem == onnx_proto.TensorProto.DOUBLE: - ty = DoubleTensorType(shape) - elif elem == onnx_proto.TensorProto.STRING: - ty = StringTensorType(shape) - elif elem == onnx_proto.TensorProto.INT64: - ty = Int64TensorType(shape) - elif elem == onnx_proto.TensorProto.INT32: - ty = Int32TensorType(shape) - elif (UInt8TensorType is not None and - elem == onnx_proto.TensorProto.UINT8): - ty = UInt8TensorType(shape) - elif (Int8TensorType is not None and - elem == onnx_proto.TensorProto.INT8): - ty = Int8TensorType(shape) - elif elem == 0: - ty = FloatTensorType(shape) - else: - raise NotImplementedError( - "Unsupported type '{}' (elem_type={}).".format( - type(obj.type.tensor_type), elem)) - else: - raise NotImplementedError("Unsupported type '{}' as " - "a string ({}).".format( - type(obj), obj)) - - return Variable(name, name, None, ty) - - def __iter__(self): - "Enables expression such as `a,b = self`." - yield self.onnx_name - yield self.type - - def __getitem__(self, index): - if index == 0: - return self.onnx_name - if index == 1: - return self.type - raise IndexError("Unreachable element at index %d." % index) - - def add_operator(self, op, in_or_out): - "Add a link to an operator, True for output, False for input." - if in_or_out: - self.operators_outputs_.append(op) - else: - self.operators_inputs_.append(op) - - def check_compatible_type(self, other_type): - - def empty_shape(shape): - return shape is None or len(shape) == 0 - - if self.type is None: - if other_type is None: - return - elif other_type is not None: - if type(self.type) == type(other_type): - if self.type.shape == other_type.shape: - return - if empty_shape(other_type.shape): - return - raise TypeError( - "Incompatible type for variable %r and type %r." % ( - self, other_type))
- - -class VariableStr(Variable): - """ - Defines a variable a string. This should be avoided. - """ - - def __init__(self, name, scope=None, type=None): - Variable.__init__(self, name, name, scope=scope, type=type) - - @property - def raw_name(self): - return self._raw_name - - @property - def onnx_name(self): - if self._onnx_name.startswith("u("): - raise RuntimeError( - "Variable should be renamed as onnx_name=%r." - "" % self._onnx_name) - return self._onnx_name - - -
[docs]class Operator: - """ - Defines an operator available in *ONNX*. - """ - class OperatorList(list): - def __init__(self, parent, kind): - super(Operator.OperatorList, self).__init__() - self.parent = parent - self.kind = kind - - def __eq__(self, second): - raise NotImplementedError( - "Operator equal not implemented and not needed.") - - def append(self, v): - if not isinstance(v, Variable): - raise TypeError( - "Input and output must be of type Variable not %r." - "" % type(v)) - if self.kind == 'Out': - v.set_parent(self.parent) - super(Operator.OperatorList, self).append(v) - logger.debug("[Op] add %s %r to %r", self.kind, v, self.parent) - if self.kind == 'In': - v.add_operator(self.parent, False) - elif self.kind == "Out": - v.add_operator(self.parent, True) - else: - raise RuntimeError( - "Unexpected value for kind=%r." % self.kind) - - def extend(self, vs): - for v in vs: - self.append(v) - - def __getitem__(self, i): - v = list.__getitem__(self, i) - if isinstance(i, int) and not isinstance(v, Variable): - raise TypeError("Element %d must be a Variable not %r." % ( - i, type(v))) - return v - - def __setitem__(self, i, v): - raise LookupError( - "Setter should not be used to modify an element.") - - def set_element(self, i, v): - "Updates element i." - if not isinstance(v, Variable): - raise TypeError( - "Value v must be a Variable not %r." % type(v)) - logger.debug( - "[Op] %s-change element %d from %r to %r in %r", - self.kind, i, self[i], v, self.parent) - list.__setitem__(self, i, v) - - def to_string(self): - names = [] - for o in self: - if hasattr(o, 'onnx_name'): - names.append(o.onnx_name) - else: - names.append('"%s"' % str(o)) - return ",".join(names) - - def __init__(self, onnx_name, scope, type, raw_operator, - target_opset, scope_inst): - """ - :param onnx_name: A unique ID, which is a string - :param scope: The name of the scope where this operator is - declared. It's a string. - :param type: A object which uniquely characterizes the type of - this operator. For example, it can be a string, - pooling, if this operator is associated with a - CoreML pooling layer. - :param raw_operator: The original operator which defines this operator; - for example, a scikit-learn Imputer and - a CoreML Normalizer. - :param target_opset: The target opset number for the converted model. - :param scope_inst: :class:`Scope` instance the operator belongs to - """ - if isinstance(raw_operator, str): - raise RuntimeError("Parameter raw_operator must be an object not " - "a string '{0}'.".format(raw_operator)) - # operator name in the converted model, if raw_operator - # is not None, output_shapes can be guessed - # from the raw model. Otherwise, it can be guessed - # from the input shapes. - self.onnx_name = onnx_name - self.scope = scope - self.type = type - self.raw_operator = raw_operator - self.inputs = Operator.OperatorList(self, 'In') - self.outputs = Operator.OperatorList(self, 'Out') - self._is_evaluated = None - self.target_opset = target_opset - self.scope_inst = scope_inst - logger.debug('[Op] +%r', self) - - def new_raw_operator(self, raw_operator, alias): - """ - Returns a shallow copy of this operator, - changes the raw_operator but keeps the same inputs - and outputs. - """ - op = Operator(self.onnx_name, self.scope, alias, raw_operator, - self.target_opset, self.scope_inst) - op.inputs = self.inputs - op.outputs = self.outputs - return op - - def __repr__(self): - try: - textop = repr(self.raw_operator) - except AttributeError: - textop = "MISSING OP" - except KeyError: - # The line above fails for python 3.7 - textop = type(self.raw_operator) - if isinstance(textop, str) and "\n" in textop: - textop = textop.replace('\n', '').replace(' ', '') - return ("Operator(type='{0}', onnx_name='{1}', inputs='{2}', " - "outputs='{3}', raw_operator={4})".format( - self.type, self.onnx_name, - self.inputs.to_string(), - self.outputs.to_string(), - textop)) - - def __setattr__(self, name, value): - if name in ('inputs', 'outputs'): - if (isinstance(value, list) and - not isinstance(value, Operator.OperatorList)): - if name == 'inputs': - self.inputs = Operator.OperatorList(self, 'In') - self.inputs.extend(value) - return - if name == 'outputs': - self.outputs = Operator.OperatorList(self, 'Out') - self.outputs.extend(value) - return - if not isinstance(value, Operator.OperatorList): - raise TypeError( - "inputs or outputs must be of type Operator.OperatorList.") - ioo = name == 'outputs' - for v in value: - v.add_operator(self, ioo) - self.__dict__[name] = value - - @property - def is_evaluated(self): - return self._is_evaluated - - def init_status(self, is_evaluated=None): - if is_evaluated is not None and is_evaluated != self.is_evaluated: - logger.debug( - '[Op] update is_evaluated=%r for %r', - is_evaluated, self) - self._is_evaluated = is_evaluated - - @property - def full_name(self): - """ - Return a globally unique operator ID - """ - return self.onnx_name - - @property - def input_full_names(self): - """ - Return all input variables' names - """ - return [variable.full_name for variable in self.inputs] - - @property - def output_full_names(self): - """ - Return all output variables' names - """ - return [variable.full_name for variable in self.outputs] - - @property - def original_operator(self): - """ - Return the original operator/layer - """ - return self.raw_operator - - def infer_types(self): - # Invoke a core inference function - if self.type is None: - raise MissingShapeCalculator( - "Unable to find a shape calculator for type '{}'.".format( - type(self.raw_operator))) - try: - shape_calc = _registration.get_shape_calculator(self.type) - except ValueError: - raise MissingShapeCalculator( - "Unable to find a shape calculator for alias '{}' " - "and type '{}'.".format(self.type, type(self.raw_operator))) - if shape_calc is None: - raise MissingShapeCalculator( - "Unexpected shape calculator for alias '{}' " - "and type '{}'.".format(self.type, type(self.raw_operator))) - logger.debug( - "[Shape-a] %r fed %r - %r", self, - "".join(str(i.is_fed) for i in self.inputs), - "".join(str(i.is_fed) for i in self.outputs)) - shape_calc(self) - logger.debug( - "[Shape-b] %r inputs=%r - outputs=%r", - self, self.inputs, self.outputs)
- - -
[docs]class Scope: - """ - Every node of an *ONNX* graph must be unique. This class holds the list - of existing name for every node already defined in graph. It also - provides functions to create a unique unused name. - """ - - def __init__(self, name, target_opset=None, - custom_shape_calculators=None, options=None, - registered_models=None, naming=None): - """ - :param name: A string, the unique ID of this scope in a - Topology object - :param target_opset: The target opset number for the converted - model. - :param custom_conversion_functions: a dictionary for specifying - the user customized conversion function - :param custom_shape_calculators: a dictionary for specifying - the user customized shape calculator - :param options: see :ref:`l-conv-options` - :param naming: the user may want to change the way intermediate - are named, this parameter can be a string (a prefix) or a - function, which signature is the following: - `get_name(name, existing_names)`, the library will then - check this name is unique and modify it if not - :param registered_models: registered models - - .. versionchanged:: 1.10.0 - Parameter *naming* was added. - """ - self.name = name - self.onnx_variable_names = set() - self.onnx_operator_names = set() - self.target_opset = target_opset - self.custom_shape_calculators = custom_shape_calculators - - # An one-to-many map from raw variable name to ONNX variable - # names. It looks like - # (key, value) = (raw_name, [onnx_name, onnx_name1, onnx_name2, ..., onnx_nameN]) # noqa - # The last name may hide all other names in this scope. - self.variable_name_mapping = {} - - # A map of local variables defined in this scope. - # (key, value) = (onnx_name, variable) - self.variables = OrderedDict() - self.input_variables = [] - self.output_variables = [] - - # A map of local operators defined in this scope. - # (key, value) = (onnx_name, operator) - self.operators = {} - - # Additional options given to converters. - self.options = options - - # Registered models - self.registered_models = registered_models - self.naming = naming - - if naming is None: - self._naming = Topology._generate_unique_name - elif isinstance(naming, str): - self._naming = ( - lambda seed, names: Topology._generate_unique_name( - self.naming + seed, names)) - elif callable(self.naming): - self._naming = ( - lambda seed, names: Topology._generate_unique_name( - self.naming(seed, names), names)) - else: - raise TypeError( - "Unexpected type for parameter naming: %r." % type(naming)) - - def get(self, var_name, default_value): - "Returns variable with 'name' or default value is not found." - return self.variables.get(var_name, default_value) - - def has_variable_name(self, name): - """ - Tells if a variable is already registered. - """ - return name in self.onnx_variable_names - - def get_shape_calculator(self, model_type): - """ - Returns the shape calculator for the given model type. - - :param model_type: model type such as *LogisticRegression* - :return: alias or None if not found - """ - return self.custom_shape_calculators.get(model_type, None) - -
[docs] def get_unique_variable_name(self, seed, rename=True): - """ - Creates a unique variable ID based on the given seed. - """ - if not isinstance(seed, str): - raise TypeError("Parameter seed must be a string not {}." - "".format(type(seed))) - if rename: - name = self._naming(seed, self.onnx_variable_names) - else: - name = Topology._generate_unique_name( - seed, self.onnx_variable_names) - return name
- -
[docs] def get_unique_operator_name(self, seed): - """ - Creates a unique operator ID based on the given seed. - """ - return self._naming(seed, self.onnx_operator_names)
- - def declare_local_variable(self, raw_name, type=None, prepend=False, - missing_type=False, rename=True): - """ - This function may create a new variable in this scope. If - *raw_name* has been used to create other variables, the new - variable will hide all other variables created using *raw_name*. - """ - if type is None and not missing_type: - raise RuntimeError( - "Unknown type for %r (type=%r)." % (raw_name, type)) - # Get unique ID for the new variable - onnx_name = self.get_unique_variable_name(raw_name, rename=rename) - - # Create the variable - variable = Variable(raw_name, onnx_name, self.name, type) - self.register_variable(variable, prepend=prepend) - return variable - - def register_variable(self, var, prepend=False): - "Adds a variable to the scope." - if var.onnx_name in self.variables: - raise RuntimeError( - "Variable %r already registered (other=%r)." % ( - var, self.variables[var.onnx_name])) - - if var.raw_name in self.variable_name_mapping: - # Hide existing variables with the same raw_name - if not prepend: - self.variable_name_mapping[var.raw_name].append(var.onnx_name) - else: - self.variable_name_mapping[var.raw_name].insert( - 0, var.onnx_name) - else: - self.variable_name_mapping[var.raw_name] = [var.onnx_name] - - self.variables[var.onnx_name] = var - - def declare_existing_subgraph_name(self, graph_proto): - """ - Declare all name from a subgraph in order to avoid being picked twice. - """ - output_name = {o.name for o in graph_proto.output} - for node in graph_proto.node: - for name in node.output: - if name in output_name: - continue - if self.has_variable_name(name): - raise NameError( - "Result name %r is already taken (outputs=%r) " - "(node=%r)." % ( - name, output_name, node)) - self.onnx_variable_names.add(name) - if node.name in self.onnx_operator_names: - raise NameError( - "Operator name %r is already taken " - "(node=%r)." % ( - node.name, node)) - self.onnx_operator_names.add(node.name) - - def rename_onnx_name(self, old_name, new_name): - if new_name in self.variables: - raise RuntimeError( - "Name %r already in variables (%r)." % ( - new_name, self.variables[new_name])) - if old_name not in self.variables: - raise RuntimeError( - "Unable to find name %r in variables." % old_name) - logger.debug( - '[Scope] update onnx_name, from %r to %r', - old_name, new_name) - self.variables[new_name] = self.variables[old_name] - del self.variables[old_name] - - def declare_local_input(self, raw_name, type=None, prepend=False, - rename=True): - """ - Calls `declare_local_variable`. Registers this variable - as an input. - """ - var = self.declare_local_variable( - raw_name, type=type, prepend=prepend, rename=rename) - self.input_variables.append(var) - return var - - def declare_local_output(self, raw_name, type=None, prepend=False, - missing_type=False): - """ - Calls `declare_local_variable`. Registers this variable - as an output. - """ - var = self.declare_local_variable( - raw_name, type=type, prepend=prepend, - missing_type=missing_type) - self.output_variables.append(var) - return var - - def declare_local_operator(self, type, raw_model=None): - """ - This function is used to declare new local operator. - """ - onnx_name = self.get_unique_operator_name(str(type)) - operator = Operator(onnx_name, self.name, type, raw_model, - self.target_opset, scope_inst=self) - self.operators[onnx_name] = operator - return operator - - def _get_allowed_options(self, model, fail=True): - if self.registered_models is not None: - if type(model) not in self.registered_models['aliases']: - if fail: - raise NotImplementedError( - "No registered models, no known allowed options " - "for model '{}'.".format(model.__class__.__name__)) - return {} - alias = self.registered_models['aliases'][type(model)] - conv = self.registered_models['conv'][alias] - allowed = conv.get_allowed_options() - return allowed - raise NotImplementedError( - "No registered models, no known allowed options " - "for model '{}'.".format(model.__class__.__name__)) - - def add_options(self, model_id, options): - """ - Adds an option, for example, - ``add_options(id(clr), {'raw_scores': True})`` - tells the converter associated to ``clr`` to - use raw score instead of probabilities. - - :param model_id: class or ``id(instance)`` - :param options: dictionary with the new values - """ - if options is None: - return - if self.options is None: - self.options = {} - if model_id not in self.options: - self.options[model_id] = None - if self.options[model_id] is None: - self.options[model_id] = {} - self.options[model_id].update(options) - - def get_options(self, model, default_values=None, fail=True): - """ - Returns additional options for a model. - It first looks by class then by id (``id(model)``). - :param model: model being converted - :param default_values: default options (it is modified by - the function) - :param fail: fails if option it not found - :return: dictionary - """ - return _build_options( - model, self.options, default_values, - self._get_allowed_options(model, fail=fail), - fail=fail) - - def replace_raw_operator(self, op1, op2, alias): - """ - Replaces every raw operator op1 by op2. - The function uses `id()` to detect op1. - """ - for v in self.operators.values(): - if id(v.raw_operator) == id(op1): - logger.debug( - '[Scope] replace %d by %d in %r.', - id(v.raw_operator), id(op1), v) - v.raw_operator = op2 - v.type = alias
- - -
[docs]class Topology: - """ - Holds instances on :class:`Scope <skl2onnx.common._topology.Scope>` and - :class:`SklearnModelContainer - <skl2onnx.common._container.SklearnModelContainer>`. - These are filled by the converters while a pipeline is being converted. - """ - - def __init__(self, model, default_batch_size=1, initial_types=None, - target_opset=None, custom_conversion_functions=None, - custom_shape_calculators=None, registered_models=None): - """ - Initializes a *Topology* object, which is an intermediate - representation of a computational graph. - - :param model: RawModelContainer object or one of its derived - classes. It contains the original model. - :param default_batch_size: batch_size prepend to scalar and - array types from CoreML. It's usually - 1 or None. - :param initial_types: A list providing some types for some - root variables. - Each element is a tuple of a variable name and a type defined - in *data_types.py*. - :param custom_conversion_functions: a dictionary for specifying - the user customized conversion function - :param custom_shape_calculators: a dictionary for specifying the - user customized shape calculator - :param registered_models: registered models - """ - self.scopes = [] - self.raw_model = model - self.scope_names = set() - self.initial_types = initial_types if initial_types else list() - self.default_batch_size = default_batch_size - self.target_opset = target_opset - self.custom_conversion_functions = ( - custom_conversion_functions if custom_conversion_functions else {}) - self.custom_shape_calculators = ( - custom_shape_calculators if custom_shape_calculators else {}) - - for k in self.custom_conversion_functions: - if not callable(k): - raise TypeError("Keys in custom_conversion_functions must be " - "types not strings.") - for k in self.custom_shape_calculators: - if not callable(k): - raise TypeError("Keys in custom_shape_calculators must be " - "types not strings.") - - # A map of local overwritten model aliases. - self.model_aliases = {} - all_model_types = (set(self.custom_conversion_functions) - | set(self.custom_shape_calculators)) - for mtype in all_model_types: - alias = "{}_{}".format(mtype.__name__, id(self)) - self.model_aliases[mtype] = alias - - # Registered models - if registered_models is None: - raise AssertionError() - self.registered_models = registered_models - - @property - def scope(self): - if len(self.scopes) != 1: - raise RuntimeError( - "Only one scope is allowed not %d." % len(self.scopes)) - return self.scopes[0] - - @staticmethod - def _generate_unique_name(seed, existing_names): - """ - Produce an unique string based on the seed - :param seed: a string - :param existing_names: a set containing strings which cannot be - produced - :return: a string similar to the seed - """ - if seed == '': - raise ValueError('Name seed must be a non-empty string.') - - # Make the seed meet C-style naming convention - # Only alphabets and numbers are allowed - seed = re.sub('[^\\w+]', '_', seed) - # The first symbol cannot be a number - if re.match('^[0-9]', seed): - seed = '_' + seed - - # If seed has never been seen, we return it as it is. Otherwise, - # we will append an number to make it unique. - if seed not in existing_names: - existing_names.add(seed) - return seed - else: - i = 1 - while seed + str(i) in existing_names: - i += 1 - new_name = seed + str(i) - existing_names.add(new_name) - return new_name - - def get_unique_scope_name(self, seed): - return Topology._generate_unique_name(seed, self.scope_names) - - def declare_scope(self, seed, parent_scopes=None, options=None, - naming=None): - """ - Creates a new :class:`Scope <skl2onnx.common._topology.Scope>` - and appends it to the list of existing scopes. - """ - if len(self.scopes) != 0: - raise RuntimeError( - "Only one scope can be created.") - scope = Scope( - self.get_unique_scope_name(seed), target_opset=self.target_opset, - custom_shape_calculators=self.custom_shape_calculators, - options=options, registered_models=self.registered_models, - naming=naming) - - # Declare input variables. - # They should be the inputs of the scikit-learn - # model you want to convert into ONNX. - for var_name, initial_type in self.initial_types: - scope.declare_local_input(var_name, initial_type, rename=False) - self.scopes.append(scope) - return scope - - def unordered_operator_iterator(self): - for scope in self.scopes: - for operator in scope.operators.values(): - yield operator - - def unordered_variable_iterator(self): - for scope in self.scopes: - for variable in scope.variables.values(): - yield variable - - def call_converter(self, operator, container, verbose=0): - "Calls converter for operator *operator*." - mtype = type(operator.raw_operator) - if mtype in self.custom_conversion_functions: - conv = self.custom_conversion_functions[mtype] - elif operator.type in self.custom_conversion_functions: - conv = self.custom_conversion_functions[operator.type] - elif hasattr(operator.raw_operator, "onnx_converter"): - conv = operator.raw_operator.onnx_converter() - else: - # Convert the selected operator into some ONNX objects and - # save them into the container - try: - conv = _registration.get_converter(operator.type) - except ValueError: - raise MissingConverter( - "Unable to find converter for alias '{}' type " - "'{}'. You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues." - "".format(operator.type, - type(getattr(operator, 'raw_model', None)))) - - container.validate_options(operator) - if verbose > 0: - print("[call_converter] call converter for %r." % operator.type) - logger.debug( - "[Conv] call %r fed %r - %r", operator, - "".join(str(i.is_fed) for i in operator.inputs), - "".join(str(i.is_fed) for i in operator.outputs)) - conv(self.scopes[0], operator, container) - logger.debug("[Conv] end - %r", operator) - - def call_shape_calculator(self, operator): - "Calls shape_calculator for operator *operator*." - mtype = type(operator.raw_operator) - if mtype in self.custom_shape_calculators: - # overwritten operator. - source = 'custom' - shape_calc = self.custom_shape_calculators[mtype] - elif operator.type in self.custom_shape_calculators: - source = 'custom' - shape_calc = self.custom_shape_calculators[operator.type] - elif hasattr(operator.raw_operator, "onnx_shape_calculator"): - source = 'onnx_shape_calculator' - shape_calc = operator.raw_operator.onnx_shape_calculator() - else: - source = "" - shape_calc = None - - if shape_calc is not None: - logger.debug( - "[Shape1] %r fed %r - %r (source=%r)", operator, - ",".join(str(i.is_fed) for i in operator.inputs), - ",".join(str(i.is_fed) for i in operator.outputs), - source) - shape_calc(operator) - else: - logger.debug('[Shape2] call infer_types for %r', operator) - operator.infer_types() - - def _initialize_graph_status_for_traversing(self): - """ - Initialize the status of all variables and operators before - traversing the graph. Only used by convert_operators. - """ - if len(self.scopes) != 1: - raise RuntimeError( - "Only one scope is allowed not %d." % len(self.scopes)) - input_names = set(v.onnx_name for v in self.scopes[0].input_variables) - if len(input_names) == 0: - raise RuntimeError("No detected inputs.") - for variable in self.unordered_variable_iterator(): - is_input = variable.onnx_name in input_names - variable.init_status(is_fed=is_input) - - for operator in self.unordered_operator_iterator(): - operator.init_status(is_evaluated=False) - - def _propagate_status(self, operator, container, fed_variables, - verbose=0): - """ - Propagates status *is_fed* based on output variable - and node added in the container. - """ - if verbose > 1: - print("[_propagate_status] after op=%r" % operator) - vars = {} - for node in container.nodes: - for i in node.input: - if i not in vars: - vars[i] = [] - vars[i].append(node) - - if verbose > 1: - print("[_propagate_status] newly fed=%r" % list( - v.onnx_name for v in operator.outputs if v.is_fed)) - stack = list(fed_variables) - scope = self.scopes[0] - while len(stack) > 0: - nodes = {} - for name in stack: - if name not in vars: - continue - for n in vars[name]: - nodes[id(n)] = n - stack = [] - for node in nodes.values(): - if all(fed_variables.get(n, False) for n in node.input): - for o in node.output: - if o not in fed_variables: - if verbose > 1: - print("[_propagate_status] add=%r" % o) - fed_variables[o] = o - stack.append(o) - if o in scope.variables: - var = scope.variables[o] - var.init_status(is_fed=True) - if verbose > 1: - print("[_propagate_status] fed=%r" % var) - - def convert_operators(self, container=None, verbose=0): - """ - Calls all converters and shape_calculator for existing - operators. It also processes new operators created by - converters. - """ - def _check_operator_(operator): - if not isinstance(operator.inputs, Operator.OperatorList): - raise TypeError( - "operator.inputs must be a Operator.OperatorList " - "not %r." % type(operator.inputs)) - if not isinstance(operator.outputs, Operator.OperatorList): - raise TypeError( - "operator.outputs must be a Operator.OperatorList " - "not %r." % type(operator.outputs)) - if any(not isinstance(i, Variable) for i in operator.inputs): - raise TypeError( - "One input is not a Variable for operator %r - %r." - "" % (type(operator.raw_operator), operator)) - if any(not isinstance(i, Variable) for i in operator.outputs): - raise TypeError( - "One output is not a Variable for operator %r - %r." - "" % (type(operator.raw_operator), operator)) - - def _check_variable_in_(variable, operator): - idop = id(operator) - ids = set(id(op) for op in variable.operators_inputs_) - if idop not in ids: - raise RuntimeError( - "Operator %r not registered in the list of operators " - "of %r taking it as an input [\n%s]." % ( - operator, variable, - "\n".join(map(str, variable.operators_inputs_)))) - - def _check_variable_out_(variable, operator): - if variable.is_fed: - add = ["", "--DEBUG-INFO--"] - for scope in self.scopes: - add.append('---') - add.append(pprint.pformat( - scope.variable_name_mapping)) - add.append('---') - for var in scope.variables.values(): - add.append(" is_fed=%s %s - n_in=%d n_out=%d" % ( - getattr(var, 'is_fed', '?'), var, - len(var.operators_inputs_), - len(var.operators_outputs_))) - add.append('---') - for op in scope.operators.values(): - add.append(" is_evaluated=%s %s" % ( - getattr(op, 'is_evaluated', '?'), op)) - add.append('---') - for v in operator.inputs: - add.append(" inputs={}".format(v)) - for v in operator.outputs: - add.append(" outputs={}".format(v)) - add.append('--- operator producing this variable--') - for op in variable.operators_outputs_: - add.append(str(op)) - raise RuntimeError( - "A variable is already assigned ({}) " - "for operator '{}' (name='{}'). " - "operator.is_evaluated={}, inputs.is_fed={}, " - "outputs.is_fed={}. " - "This may still happen if a converter is a " - "combination of sub-estimators and one " - "of them is producing this output. " - "In that case, an identity node must be " - "added.{}".format( - variable, operator.type, - operator.onnx_name, operator.is_evaluated, - [v.is_fed for v in operator.inputs], - [v.is_fed for v in operator.outputs], - "\n".join(add))) - - if verbose > 0: - print("[convert_operators] begin") - self._initialize_graph_status_for_traversing() - fed_variables = {i.name: i for i in container.initializers} - changes = 1 - n_iter = 0 - while changes > 0: - n_iter += 1 - changes = 0 - ops = list(self.unordered_operator_iterator()) - if verbose > 0: - print("[convert_operators] iteration %d - n_vars=%d " - "n_ops=%d" % ( - n_iter, len(fed_variables), len(ops))) - for operator in ops: - _check_operator_(operator) - for var in operator.inputs: - if var.is_fed: - fed_variables[var.onnx_name] = var - if (all(variable.is_fed for variable in operator.inputs) and - not operator.is_evaluated): - - for variable in operator.inputs: - _check_variable_in_(variable, operator) - for variable in operator.outputs: - _check_variable_out_(variable, operator) - - self.call_shape_calculator(operator) - self.call_converter(operator, container, verbose=verbose) - - # If an operator contains a sequence of operators, - # output variables are not necessarily known at this stage. - operator.init_status(is_evaluated=True) - for variable in operator.outputs: - if all(op.is_evaluated - for op in variable.operators_outputs_): - variable.init_status(is_fed=True) - fed_variables[variable.onnx_name] = variable - fed_variables.update( - {i.name: i for i in container.initializers - if i.name not in fed_variables}) - self._propagate_status(operator, container, fed_variables, - verbose=verbose) - - # unfed some variables (it happens when a node - # shares an output with another node) - rem = [] - for n, var in fed_variables.items(): - if not hasattr(var, 'operators_outputs_'): - # initializer - continue - if any(not o.is_evaluated - for o in var.operators_outputs_): - rem.append(n) - for r in rem: - v = fed_variables[r] - v.init_status(is_fed=False) - del fed_variables[v.onnx_name] - changes += 1 - - if verbose > 0: - print("[convert_operators] end iter: %d - n_vars=%d" % ( - n_iter, len(fed_variables))) - if verbose > 0: - print("[convert_operators] end.") - - # Last verification. - not_evaluated = [] - for op in self.unordered_operator_iterator(): - if not op.is_evaluated: - not_evaluated.append(op) - if len(not_evaluated) > 0: - rows = ["---VARS---"] - for var in self.unordered_variable_iterator(): - rows.append( - "is_fed=%r is_leaf=%r is_root=%r - %r - n_in=%d n_out=%d" - "" % (var.is_fed, var.is_leaf, var.is_root, var, - len(var.operators_inputs_), - len(var.operators_outputs_))) - rows.append("---OPERATORS---") - for op in self.unordered_operator_iterator(): - rows.append("is_eval=%r - %r" % (op.is_evaluated, op)) - rows.append("---NODES---") - for node in container.nodes: - rows.append("%s: %r -> %r" % ( - node.op_type, node.input, node.output)) - raise RuntimeError( - "Not all operators have been evaluated. A variable name " - "is probably misspelled.\n%s" - "" % "\n".join(rows)) - - # Input and output - if len(self.scopes[0].input_variables) > 0: - inputs = self.scopes[0].input_variables - else: - inputs = [v for v in self.unordered_variable_iterator() - if v.is_root] - for i in inputs: - container.add_input(i) - outputs = [v for v in self.unordered_variable_iterator() - if v.is_leaf] - - # The function checks that for output variable, - # raw_name equal onnx_name. It swaps names if it is not the case. - to_swap = [] - for out in outputs: - if out.raw_name != out.onnx_name: - to_swap.append(out) - if len(to_swap) != 0: - swaped = set() - for var in to_swap: - if var.raw_name in swaped: - continue - swaped.add(var.raw_name) - if verbose > 1: - print("[convert_operators] %r <-> %r." % ( - var.raw_name, var.onnx_name)) - old_name = var.onnx_name - new_name = var.raw_name - - try: - container.swap_names(old_name, new_name) - except NotImplementedError as e: - logger.debug( - '[Topo] unable to swap %r and %r (%r).', - old_name, new_name, e) - continue - - for v in self.unordered_variable_iterator(): - if v.onnx_name == old_name: - v.set_onnx_name(new_name) - elif v.onnx_name == new_name: - v.set_onnx_name(old_name) - - for o in outputs: - container.add_output(o)
- - -
[docs]def convert_topology(topology, model_name, doc_string, target_opset, - channel_first_inputs=None, - options=None, remove_identity=True, - verbose=0): - """ - This function is used to convert our Topology object defined in - _parser.py into a ONNX model (type: ModelProto). - - :param topology: The Topology object we are going to convert - :param model_name: GraphProto's name. Let "model" denote the - returned model. The string "model_name" would be - assigned to "model.graph.name." - :param doc_string: A string attached to the produced model - :param target_opset: number or dictionary, - for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, - a dictionary is used to indicate different opset for - different domains - :param options: see :ref:`l-conv-options` - :param remove_identity: removes identity nodes - include '1.1.2', '1.2', and so on. - :param verbose: displays information while converting - :return: a ONNX ModelProto - """ - if target_opset is None: - target_opset = get_latest_tested_opset_version() - if isinstance(target_opset, dict): - onnx_target_opset = target_opset.get( - '', get_latest_tested_opset_version()) - else: - onnx_target_opset = target_opset - if onnx_target_opset > get_opset_number_from_onnx(): - found = get_opset_number_from_onnx() - raise RuntimeError( - "Parameter target_opset {} > {} is higher than the " - "version of the installed onnx package. See " - "https://github.com/onnx/onnx/blob/master/docs/" - "Versioning.md#released-versions" - ".".format(onnx_target_opset, found)) - if onnx_target_opset > get_latest_tested_opset_version(): - warnings.warn( - "Parameter target_opset {} > {} is higher than the " - "the latest tested version" - ".".format( - onnx_target_opset, - get_latest_tested_opset_version())) - - container = ModelComponentContainer( - target_opset, options=options, - registered_models=topology.registered_models, - white_op=topology.raw_model._white_op, - black_op=topology.raw_model._black_op, - verbose=verbose) - - # Traverse the graph from roots to leaves - # This loop could eventually be parallelized. - topology.convert_operators(container=container, verbose=verbose) - container.ensure_topological_order() - - if len(container.inputs) == 0: - raise RuntimeError("No detected inputs after conversion.") - if len(container.outputs) == 0: - raise RuntimeError("No detected outputs after conversion.") - if verbose >= 2: - print("---NODES---") - for node in container.nodes: - print(" %s - %s: %r -> %r" % ( - node.op_type, node.name, node.input, node.output)) - - # Create a graph from its main components - if container.target_opset_onnx < 9: - # When calling ModelComponentContainer's add_initializer(...), - # nothing is added into the input list. However, for ONNX target - # opset < 9, initializers should also be a part of model's - # (GraphProto) inputs. Thus, we create ValueInfoProto objects - # from initializers (type: TensorProto) directly and then add - # them into model's input list. - extra_inputs = [] # ValueInfoProto list of the initializers - for tensor in container.initializers: - # Sometimes (especially when creating optional input values - # such as RNN's initial hidden state), an initializer is also - # one of the original model's input, so it has been added into - # the container's input list. If this is the case, we need to - # skip one iteration to avoid duplicated inputs. - if tensor.name in [value_info.name for value_info in - container.inputs]: - continue - - # Initializers are always tensors so we can just call - # make_tensor_value_info(...). - value_info = make_tensor_value_info( - tensor.name, tensor.data_type, tensor.dims) - extra_inputs.append(value_info) - - # Before ONNX opset 9, initializers were needed to be passed in - # with inputs. - graph = make_graph(container.nodes, model_name, - container.inputs + extra_inputs, - container.outputs, container.initializers) - else: - # In ONNX opset 9 and above, initializers are included as - # operator inputs and therefore do not need to be passed as - # extra_inputs. - graph = make_graph( - container.nodes, model_name, container.inputs, - container.outputs, container.initializers) - - # Add extra information related to the graph - graph.value_info.extend(container.value_info) - - # Create model - onnx_model = make_model(graph) - - # Update domain version - opv = min(onnx_target_opset, - _get_main_opset_version(onnx_model) or onnx_target_opset) - if not _update_domain_version(container, onnx_model, verbose=verbose): - # Main opset was not added. Doing it here. - op_set = onnx_model.opset_import.add() - op_set.domain = '' - op_set.version = opv - if verbose > 0: - print('[convert_topology] +opset: name=%r, version=%s' % ( - '', opv)) - - # Add extra information - irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION) - onnx_model.ir_version = irv - onnx_model.producer_name = utils.get_producer() - onnx_model.producer_version = utils.get_producer_version() - onnx_model.domain = utils.get_domain() - onnx_model.model_version = utils.get_model_version() - onnx_model.doc_string = doc_string - - # Removes many identity nodes, - # the converter may introduct identity nodes - # after a zipmap operator and onnx <= 1.7 does not - # support that. It does not use onnxconverter-common - # as the optimizer only support opset >= 9. - if remove_identity: - onnx_model = onnx_remove_node_identity(onnx_model) - - return onnx_model
- - -def _update_domain_version(container, onnx_model, verbose=0): - # Merge operator sets for the same domain, the largest version - # number would be kept - purified_operator_set = dict() - for op_domain, op_version in container.node_domain_version_pair_sets: - if op_domain not in purified_operator_set: - purified_operator_set[op_domain] = op_version - else: - purified_operator_set[op_domain] = max( - purified_operator_set[op_domain], op_version) - - # Fill operator sets - i = 0 - for op_domain, op_version in purified_operator_set.items(): - if op_version is None: - continue - if i == 0 and len(onnx_model.opset_import) == 1: - # Overwrite the default operator set created by - # make_model(...) - op_set = onnx_model.opset_import[0] - else: - # Just create one ONNX element in opset_import - op_set = onnx_model.opset_import.add() - if verbose > 0: - print('[_update_domain_version] +opset %d: name=%r, version=%s' % ( - i, op_domain, op_version)) - op_set.domain = op_domain - if op_set != '': - max_supported = get_default_opset_for_domain(op_domain) - if max_supported is not None and max_supported < op_version: - raise RuntimeError( - "The model is using version %d of domain %r not supported " - "yet by this library. You need to specify " - "target_opset={%r: %r}." % ( - op_version, op_domain, op_domain, max_supported)) - op_set.version = op_version - - i += 1 - if container.target_opset_any_domain(op_domain) < op_version: - raise RuntimeError( - 'The specified opset %d is too low to convert ' - 'this model, which requires at least opset ' - '%d.' % ( - container.target_opset_any_domain(op_domain), - op_version)) - return '' in purified_operator_set - - -def _get_main_opset_version(model): - """ - Returns the main opset version. - """ - mld = None - for op in model.opset_import: - if op.domain == '': - return op.version - if op.domain == "ai.onnx.ml": - mld = op.version - if mld is not None: - return OPSET_ML_TO_OPSET.get(mld, None) - return None -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.common._topology - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.common._topology

+# SPDX-License-Identifier: Apache-2.0
+
+
+import re
+import warnings
+import pprint
+from logging import getLogger
+from collections import OrderedDict
+import numpy as np
+from onnx import onnx_pb as onnx_proto
+from onnxconverter_common.data_types import (  # noqa
+    DataType, TensorType,
+    FloatType, Int64Type, StringType,
+    DictionaryType, FloatTensorType,  # noqa
+    Int64TensorType, SequenceType,  # noqa
+    StringTensorType, DoubleTensorType,
+    Int32TensorType, BooleanTensorType,
+    DoubleTensorType)
+try:
+    from onnxconverter_common.data_types import (
+        Int8TensorType, UInt8TensorType)
+except ImportError:
+    Int8TensorType = None
+    UInt8TensorType = None
+from ..proto import (
+    get_opset_number_from_onnx,
+    get_latest_tested_opset_version
+)
+from ..proto.onnx_helper_modified import (
+    make_graph, make_model, make_tensor_value_info
+)
+from . import _registration
+from . import utils
+from .exceptions import MissingShapeCalculator, MissingConverter
+from ._container import ModelComponentContainer, _build_options
+from .onnx_optimisation_identity import onnx_remove_node_identity
+
+type_fct = type
+
+
+def _default_OPSET_TO_IR_VERSION():
+    return {
+        1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3,
+        7: 3, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7,
+        13: 7, 14: 7, 15: 8, 16: 8, 17: 8, 18: 8
+    }
+
+
+try:
+    from onnxconverter_common.topology import OPSET_TO_IR_VERSION
+    assert OPSET_TO_IR_VERSION[18] is not None
+except (ImportError, KeyError):
+    OPSET_TO_IR_VERSION = _default_OPSET_TO_IR_VERSION()
+
+OPSET_ML_TO_OPSET = {1: 11, 2: 15, 3: 18}
+
+logger = getLogger('skl2onnx')
+
+
+def get_default_opset_for_domain(domain):
+    """
+    Returns the associated for a domain given the main opset.
+    """
+    from .. import __max_supported_opset__ as main_opset
+    if domain == '':
+        return main_opset
+    if domain == 'ai.onnx.ml':
+        if main_opset >= 16:
+            return 3
+        if main_opset < 6:
+            return 1
+        return 2
+    if domain == 'ai.onnx.training':
+        return 1
+    return None
+
+
+
[docs]class Variable: + """ + Defines a variable which holds any data defined + from *ONNX* types. + """ + _UNIQUE_NUMBER_ = 0 + + def __init__(self, raw_name, onnx_name, scope, type=None): + """ + :param raw_name: A string indicating the variable's name in the + original model. Usually, it's the seed string + used to created its ONNX name (i.e., the + field *onnx_name* below). + :param onnx_name: A string indicating the variable's name in + the converted model + :param scope: A string. It's the name of the scope where this + variable is declared + :param type: A type object defined in .common.data_types.py; + e.g., FloatTensorType + """ + if not isinstance(raw_name, str): + raise TypeError( + "raw_name must be a string not '%s'." % raw_name.__class__) + if type is not None and not hasattr(type, 'shape'): + raise TypeError( + "Unexpected type for variable raw_name=%r, type=%r." % ( + raw_name, type)) + if not isinstance(onnx_name, str) or '(' in onnx_name: + if onnx_name.startswith('u(') and onnx_name[-1] == ')': + onnx_name0 = onnx_name + if scope is None: + onnx_name = "UU%03dUU" % Variable._UNIQUE_NUMBER_ + Variable._UNIQUE_NUMBER_ += 1 + else: + onnx_name = scope.get_unique_variable_name("U") + logger.debug( + '[Var] rename raw_name=%r, onnx_name=%r into %r', + raw_name, onnx_name0, onnx_name) + else: + raise TypeError( + "onnx_name must be a string not %r." % onnx_name) + + if type is not None: + shape = type.shape + if shape is not None: + not_none = [v for v in shape if v is not None] + if len(not_none) and min(not_none) == 0: + raise RuntimeError( + "A variable cannot be empty, raw_name=%r, " + "onnx_name=%r, shape=%r, type=%r." % ( + raw_name, onnx_name, shape, type)) + + self._raw_name = raw_name + self._onnx_name = onnx_name + self._scope = scope + self._type = type + self._parent = None + + # The following fields are bool variables used in parsing and + # compiling stages + self._is_fed = None + self._is_root = None + self._is_leaf = None + if self.type is not None and not isinstance(self.type, DataType): + raise TypeError( + "shape must be a DataType not {}.".format(self.type)) + if isinstance(self.type, TensorType): + shape = self.type.shape + if not isinstance(shape, (list, tuple)): + try: + shape = list(shape) + except TypeError: + raise TypeError("shape must be a tuple or a list not " + "{}.".format(type_fct(shape))) + for dim in shape: + if dim is None: + continue + if not isinstance(dim, (int, np.int32, np.int64, np.intc)): + raise TypeError( + "shape must contains integers not %r (type=%r)." + "" % (dim, dim.__class__)) + logger.debug('[Var] +%s', self) + + # links to operators using those variables + self.operators_outputs_ = [] + self.operators_inputs_ = [] + self._check() + + def _check(self): + if self.type is not None and self.type.shape is not None: + for k in self.type.shape: + if k is None: + continue + if not isinstance(k, (int, np.integer)): + raise ValueError( + "Unexpected type %r for shape %r." + "" % (type(k), self)) + + @property + def raw_name(self): + return self._raw_name + + @property + def onnx_name(self): + return self._onnx_name + + @property + def scope(self): + return self._scope + + @property + def type(self): + return self._type + + @property + def is_fed(self): + return self._is_fed + + @property + def is_root(self): + return self._is_root + + @property + def is_leaf(self): + return self._is_leaf + + def init_status(self, is_fed=None, is_root=None, is_leaf=None): + if is_fed is not None and is_fed != self.is_fed: + logger.debug( + '[Var] update is_fed=%r for %r, parent=%r', + is_fed, self, self._parent) + self._is_fed = is_fed + if is_root is not None and is_root != self.is_root: + logger.debug('[Var] update is_root=%r for %r', is_root, self) + self._is_root = is_root + if is_leaf is not None and is_leaf != self.is_leaf: + logger.debug('[Var] update is_leaf=%r for %r', is_leaf, self) + self._is_leaf = is_leaf + + def __setattr__(self, name, value): + if name == "type": + self.set_type(value) + elif name == "onnx_name": + raise AttributeError("You must use method set_onnx_name.") + elif name in {"is_fed", "is_root", "is_leaf"}: + raise AttributeError("You must use method init_status.") + elif name in {'scope', 'raw_name'}: + raise AttributeError("scope or raw_name cannot be changed.") + self.__dict__[name] = value + + def set_type(self, new_type): + if (new_type is None or isinstance(new_type, (str, Variable)) or + not hasattr(new_type, 'shape')): + raise TypeError( + "Unexpected new type for variable %r, new_type=%r." % ( + self, new_type)) + logger.debug('[Var] update type for %r', self) + self._type = new_type + self._check() + + def set_onnx_name(self, onnx_name): + if onnx_name != self._onnx_name: + logger.debug( + '[Var] update onnx_name, from %r to %r in %r', + self.onnx_name, onnx_name, self) + if self.scope is not None and not isinstance(self.scope, str): + self.scope.rename_onnx_name(self._onnx_name, onnx_name) + self._onnx_name = onnx_name + + def set_parent(self, operator): + if self._parent is not None: + raise RuntimeError( + "This variable is already the output of operator %r. " + "It cannot be the output of %r." % (self._parent, operator)) + logger.debug( + '[Var] set parent for %r, parent=%r', self, operator) + self._parent = operator + + def get_first_dimension(self): + """ + Returns the first dimension (batch dimension) or + None if not specified (shape is empty). + """ + if (self.type is None or self.type.shape is None or + len(self.type.shape) == 0): + return None + return self.type.shape[0] + + def get_second_dimension(self): + if (self.type is None or self.type.shape is None or + len(self.type.shape) < 2): + return None + return self.type.shape[1] + + @property + def full_name(self): + """ + Return a globally unique variable ID + """ + return self.onnx_name + + def __repr__(self): + return ("Variable('{0}', '{1}', type={2})".format( + self.raw_name, self.onnx_name, self.type)) + + @staticmethod + def from_pb(obj): + """ + Creates a data type from a protobuf object. + """ + def get_dim(d): + r = d.dim_value + if "dim_param" in str(d): + return None + if r == 0: + # dim_value is 0 when it is 0 or undefined + return 0 if "0" in str(d) else None + return r + + def get_shape(tt): + return [get_dim(tt.shape.dim[i]) + for i in range(len(tt.shape.dim))] + + if hasattr(obj, 'extend'): + return [Variable.from_pb(o) for o in obj] + + name = obj.name + if obj.type.tensor_type: + tt = obj.type.tensor_type + elem = tt.elem_type + shape = get_shape(tt) + if elem == onnx_proto.TensorProto.FLOAT: + ty = FloatTensorType(shape) + elif elem == onnx_proto.TensorProto.BOOL: + ty = BooleanTensorType(shape) + elif elem == onnx_proto.TensorProto.DOUBLE: + ty = DoubleTensorType(shape) + elif elem == onnx_proto.TensorProto.STRING: + ty = StringTensorType(shape) + elif elem == onnx_proto.TensorProto.INT64: + ty = Int64TensorType(shape) + elif elem == onnx_proto.TensorProto.INT32: + ty = Int32TensorType(shape) + elif (UInt8TensorType is not None and + elem == onnx_proto.TensorProto.UINT8): + ty = UInt8TensorType(shape) + elif (Int8TensorType is not None and + elem == onnx_proto.TensorProto.INT8): + ty = Int8TensorType(shape) + elif elem == 0: + ty = FloatTensorType(shape) + else: + raise NotImplementedError( + "Unsupported type '{}' (elem_type={}).".format( + type(obj.type.tensor_type), elem)) + else: + raise NotImplementedError("Unsupported type '{}' as " + "a string ({}).".format( + type(obj), obj)) + + return Variable(name, name, None, ty) + + def __iter__(self): + "Enables expression such as `a,b = self`." + yield self.onnx_name + yield self.type + + def __getitem__(self, index): + if index == 0: + return self.onnx_name + if index == 1: + return self.type + raise IndexError("Unreachable element at index %d." % index) + + def add_operator(self, op, in_or_out): + "Add a link to an operator, True for output, False for input." + if in_or_out: + self.operators_outputs_.append(op) + else: + self.operators_inputs_.append(op) + + def check_compatible_type(self, other_type): + + def empty_shape(shape): + return shape is None or len(shape) == 0 + + if self.type is None: + if other_type is None: + return + elif other_type is not None: + if isinstance(self.type, type(other_type)): + if self.type.shape == other_type.shape: + return + if empty_shape(other_type.shape): + return + raise TypeError( + "Incompatible type for variable %r and type %r." % ( + self, other_type))
+ + +class VariableStr(Variable): + """ + Defines a variable a string. This should be avoided. + """ + + def __init__(self, name, scope=None, type=None): + Variable.__init__(self, name, name, scope=scope, type=type) + + @property + def raw_name(self): + return self._raw_name + + @property + def onnx_name(self): + if self._onnx_name.startswith("u("): + raise RuntimeError( + "Variable should be renamed as onnx_name=%r." + "" % self._onnx_name) + return self._onnx_name + + +
[docs]class Operator: + """ + Defines an operator available in *ONNX*. + """ + class OperatorList(list): + def __init__(self, parent, kind): + super(Operator.OperatorList, self).__init__() + self.parent = parent + self.kind = kind + + def __eq__(self, second): + raise NotImplementedError( + "Operator equal not implemented and not needed.") + + def append(self, v): + if not isinstance(v, Variable): + raise TypeError( + "Input and output must be of type Variable not %r." + "" % type(v)) + if self.kind == 'Out': + v.set_parent(self.parent) + super(Operator.OperatorList, self).append(v) + logger.debug("[Op] add %s %r to %r", self.kind, v, self.parent) + if self.kind == 'In': + v.add_operator(self.parent, False) + elif self.kind == "Out": + v.add_operator(self.parent, True) + else: + raise RuntimeError( + "Unexpected value for kind=%r." % self.kind) + + def extend(self, vs): + for v in vs: + self.append(v) + + def __getitem__(self, i): + v = list.__getitem__(self, i) + if isinstance(i, int) and not isinstance(v, Variable): + raise TypeError("Element %d must be a Variable not %r." % ( + i, type(v))) + return v + + def __setitem__(self, i, v): + raise LookupError( + "Setter should not be used to modify an element.") + + def set_element(self, i, v): + "Updates element i." + if not isinstance(v, Variable): + raise TypeError( + "Value v must be a Variable not %r." % type(v)) + logger.debug( + "[Op] %s-change element %d from %r to %r in %r", + self.kind, i, self[i], v, self.parent) + list.__setitem__(self, i, v) + + def to_string(self): + names = [] + for o in self: + if hasattr(o, 'onnx_name'): + names.append(o.onnx_name) + else: + names.append('"%s"' % str(o)) + return ",".join(names) + + def __init__(self, onnx_name, scope, type, raw_operator, + target_opset, scope_inst): + """ + :param onnx_name: A unique ID, which is a string + :param scope: The name of the scope where this operator is + declared. It's a string. + :param type: A object which uniquely characterizes the type of + this operator. For example, it can be a string, + pooling, if this operator is associated with a + CoreML pooling layer. + :param raw_operator: The original operator which defines this operator; + for example, a scikit-learn Imputer and + a CoreML Normalizer. + :param target_opset: The target opset number for the converted model. + :param scope_inst: :class:`Scope` instance the operator belongs to + """ + if isinstance(raw_operator, str): + raise RuntimeError("Parameter raw_operator must be an object not " + "a string '{0}'.".format(raw_operator)) + # operator name in the converted model, if raw_operator + # is not None, output_shapes can be guessed + # from the raw model. Otherwise, it can be guessed + # from the input shapes. + self.onnx_name = onnx_name + self.scope = scope + self.type = type + self.raw_operator = raw_operator + self.inputs = Operator.OperatorList(self, 'In') + self.outputs = Operator.OperatorList(self, 'Out') + self._is_evaluated = None + self.target_opset = target_opset + self.scope_inst = scope_inst + logger.debug('[Op] +%r', self) + + def new_raw_operator(self, raw_operator, alias): + """ + Returns a shallow copy of this operator, + changes the raw_operator but keeps the same inputs + and outputs. + """ + op = Operator(self.onnx_name, self.scope, alias, raw_operator, + self.target_opset, self.scope_inst) + op.inputs = self.inputs + op.outputs = self.outputs + return op + + def __repr__(self): + try: + textop = repr(self.raw_operator) + except AttributeError: + textop = "MISSING OP" + except KeyError: + # The line above fails for python 3.7 + textop = type(self.raw_operator) + if isinstance(textop, str) and "\n" in textop: + textop = textop.replace('\n', '').replace(' ', '') + return ("Operator(type='{0}', onnx_name='{1}', inputs='{2}', " + "outputs='{3}', raw_operator={4})".format( + self.type, self.onnx_name, + self.inputs.to_string(), + self.outputs.to_string(), + textop)) + + def __setattr__(self, name, value): + if name in ('inputs', 'outputs'): + if (isinstance(value, list) and + not isinstance(value, Operator.OperatorList)): + if name == 'inputs': + self.inputs = Operator.OperatorList(self, 'In') + self.inputs.extend(value) + return + if name == 'outputs': + self.outputs = Operator.OperatorList(self, 'Out') + self.outputs.extend(value) + return + if not isinstance(value, Operator.OperatorList): + raise TypeError( + "inputs or outputs must be of type Operator.OperatorList.") + ioo = name == 'outputs' + for v in value: + v.add_operator(self, ioo) + self.__dict__[name] = value + + @property + def is_evaluated(self): + return self._is_evaluated + + def init_status(self, is_evaluated=None): + if is_evaluated is not None and is_evaluated != self.is_evaluated: + logger.debug( + '[Op] update is_evaluated=%r for %r', + is_evaluated, self) + self._is_evaluated = is_evaluated + + @property + def full_name(self): + """ + Return a globally unique operator ID + """ + return self.onnx_name + + @property + def input_full_names(self): + """ + Return all input variables' names + """ + return [variable.full_name for variable in self.inputs] + + @property + def output_full_names(self): + """ + Return all output variables' names + """ + return [variable.full_name for variable in self.outputs] + + @property + def original_operator(self): + """ + Return the original operator/layer + """ + return self.raw_operator + + def infer_types(self): + # Invoke a core inference function + if self.type is None: + raise MissingShapeCalculator( + "Unable to find a shape calculator for type '{}'.".format( + type(self.raw_operator))) + try: + shape_calc = _registration.get_shape_calculator(self.type) + except ValueError: + raise MissingShapeCalculator( + "Unable to find a shape calculator for alias '{}' " + "and type '{}'.".format(self.type, type(self.raw_operator))) + if shape_calc is None: + raise MissingShapeCalculator( + "Unexpected shape calculator for alias '{}' " + "and type '{}'.".format(self.type, type(self.raw_operator))) + logger.debug( + "[Shape-a] %r fed %r - %r", self, + "".join(str(i.is_fed) for i in self.inputs), + "".join(str(i.is_fed) for i in self.outputs)) + shape_calc(self) + logger.debug( + "[Shape-b] %r inputs=%r - outputs=%r", + self, self.inputs, self.outputs)
+ + +
[docs]class Scope: + """ + Every node of an *ONNX* graph must be unique. This class holds the list + of existing name for every node already defined in graph. It also + provides functions to create a unique unused name. + """ + + def __init__(self, name, target_opset=None, + custom_shape_calculators=None, options=None, + registered_models=None, naming=None): + """ + :param name: A string, the unique ID of this scope in a + Topology object + :param target_opset: The target opset number for the converted + model. + :param custom_conversion_functions: a dictionary for specifying + the user customized conversion function + :param custom_shape_calculators: a dictionary for specifying + the user customized shape calculator + :param options: see :ref:`l-conv-options` + :param naming: the user may want to change the way intermediate + are named, this parameter can be a string (a prefix) or a + function, which signature is the following: + `get_name(name, existing_names)`, the library will then + check this name is unique and modify it if not + :param registered_models: registered models + + .. versionchanged:: 1.10.0 + Parameter *naming* was added. + """ + self.name = name + self.onnx_variable_names = set() + self.onnx_operator_names = set() + self.target_opset = target_opset + self.custom_shape_calculators = custom_shape_calculators + + # An one-to-many map from raw variable name to ONNX variable + # names. It looks like + # (key, value) = (raw_name, [onnx_name, onnx_name1, onnx_name2, ..., onnx_nameN]) # noqa + # The last name may hide all other names in this scope. + self.variable_name_mapping = {} + + # A map of local variables defined in this scope. + # (key, value) = (onnx_name, variable) + self.variables = OrderedDict() + self.input_variables = [] + self.output_variables = [] + + # A map of local operators defined in this scope. + # (key, value) = (onnx_name, operator) + self.operators = {} + + # Additional options given to converters. + self.options = options + + # Registered models + self.registered_models = registered_models + self.naming = naming + + if naming is None: + self._naming = Topology._generate_unique_name + elif isinstance(naming, str): + self._naming = ( + lambda seed, names: Topology._generate_unique_name( + self.naming + seed, names)) + elif callable(self.naming): + self._naming = ( + lambda seed, names: Topology._generate_unique_name( + self.naming(seed, names), names)) + else: + raise TypeError( + "Unexpected type for parameter naming: %r." % type(naming)) + + def get(self, var_name, default_value): + "Returns variable with 'name' or default value is not found." + return self.variables.get(var_name, default_value) + + def has_variable_name(self, name): + """ + Tells if a variable is already registered. + """ + return name in self.onnx_variable_names + + def get_shape_calculator(self, model_type): + """ + Returns the shape calculator for the given model type. + + :param model_type: model type such as *LogisticRegression* + :return: alias or None if not found + """ + return self.custom_shape_calculators.get(model_type, None) + +
[docs] def get_unique_variable_name(self, seed, rename=True): + """ + Creates a unique variable ID based on the given seed. + """ + if not isinstance(seed, str): + raise TypeError("Parameter seed must be a string not {}." + "".format(type(seed))) + if rename: + name = self._naming(seed, self.onnx_variable_names) + else: + name = Topology._generate_unique_name( + seed, self.onnx_variable_names) + return name
+ +
[docs] def get_unique_operator_name(self, seed): + """ + Creates a unique operator ID based on the given seed. + """ + return self._naming(seed, self.onnx_operator_names)
+ + def declare_local_variable(self, raw_name, type=None, prepend=False, + missing_type=False, rename=True): + """ + This function may create a new variable in this scope. If + *raw_name* has been used to create other variables, the new + variable will hide all other variables created using *raw_name*. + """ + if type is None and not missing_type: + raise RuntimeError( + "Unknown type for %r (type=%r)." % (raw_name, type)) + # Get unique ID for the new variable + onnx_name = self.get_unique_variable_name(raw_name, rename=rename) + + # Create the variable + variable = Variable(raw_name, onnx_name, self.name, type) + self.register_variable(variable, prepend=prepend) + return variable + + def register_variable(self, var, prepend=False): + "Adds a variable to the scope." + if var.onnx_name in self.variables: + raise RuntimeError( + "Variable %r already registered (other=%r)." % ( + var, self.variables[var.onnx_name])) + + if var.raw_name in self.variable_name_mapping: + # Hide existing variables with the same raw_name + if not prepend: + self.variable_name_mapping[var.raw_name].append(var.onnx_name) + else: + self.variable_name_mapping[var.raw_name].insert( + 0, var.onnx_name) + else: + self.variable_name_mapping[var.raw_name] = [var.onnx_name] + + self.variables[var.onnx_name] = var + + def declare_existing_subgraph_name(self, graph_proto): + """ + Declare all name from a subgraph in order to avoid being picked twice. + """ + output_name = {o.name for o in graph_proto.output} + for node in graph_proto.node: + for name in node.output: + if name in output_name: + continue + if self.has_variable_name(name): + raise NameError( + "Result name %r is already taken (outputs=%r) " + "(node=%r)." % ( + name, output_name, node)) + self.onnx_variable_names.add(name) + if node.name in self.onnx_operator_names: + raise NameError( + "Operator name %r is already taken " + "(node=%r)." % ( + node.name, node)) + self.onnx_operator_names.add(node.name) + + def rename_onnx_name(self, old_name, new_name): + if new_name in self.variables: + raise RuntimeError( + "Name %r already in variables (%r)." % ( + new_name, self.variables[new_name])) + if old_name not in self.variables: + raise RuntimeError( + "Unable to find name %r in variables." % old_name) + logger.debug( + '[Scope] update onnx_name, from %r to %r', + old_name, new_name) + self.variables[new_name] = self.variables[old_name] + del self.variables[old_name] + + def declare_local_input(self, raw_name, type=None, prepend=False, + rename=True): + """ + Calls `declare_local_variable`. Registers this variable + as an input. + """ + var = self.declare_local_variable( + raw_name, type=type, prepend=prepend, rename=rename) + self.input_variables.append(var) + return var + + def declare_local_output(self, raw_name, type=None, prepend=False, + missing_type=False): + """ + Calls `declare_local_variable`. Registers this variable + as an output. + """ + var = self.declare_local_variable( + raw_name, type=type, prepend=prepend, + missing_type=missing_type) + self.output_variables.append(var) + return var + + def declare_local_operator(self, type, raw_model=None): + """ + This function is used to declare new local operator. + """ + onnx_name = self.get_unique_operator_name(str(type)) + operator = Operator(onnx_name, self.name, type, raw_model, + self.target_opset, scope_inst=self) + self.operators[onnx_name] = operator + return operator + + def _get_allowed_options(self, model, fail=True): + if self.registered_models is not None: + if type(model) not in self.registered_models['aliases']: + if fail: + raise NotImplementedError( + "No registered models, no known allowed options " + "for model '{}'.".format(model.__class__.__name__)) + return {} + alias = self.registered_models['aliases'][type(model)] + conv = self.registered_models['conv'][alias] + allowed = conv.get_allowed_options() + return allowed + raise NotImplementedError( + "No registered models, no known allowed options " + "for model '{}'.".format(model.__class__.__name__)) + + def add_options(self, model_id, options): + """ + Adds an option, for example, + ``add_options(id(clr), {'raw_scores': True})`` + tells the converter associated to ``clr`` to + use raw score instead of probabilities. + + :param model_id: class or ``id(instance)`` + :param options: dictionary with the new values + """ + if options is None: + return + if self.options is None: + self.options = {} + if model_id not in self.options: + self.options[model_id] = None + if self.options[model_id] is None: + self.options[model_id] = {} + self.options[model_id].update(options) + + def get_options(self, model, default_values=None, fail=True): + """ + Returns additional options for a model. + It first looks by class then by id (``id(model)``). + :param model: model being converted + :param default_values: default options (it is modified by + the function) + :param fail: fails if option it not found + :return: dictionary + """ + return _build_options( + model, self.options, default_values, + self._get_allowed_options(model, fail=fail), + fail=fail) + + def replace_raw_operator(self, op1, op2, alias): + """ + Replaces every raw operator op1 by op2. + The function uses `id()` to detect op1. + """ + for v in self.operators.values(): + if id(v.raw_operator) == id(op1): + logger.debug( + '[Scope] replace %d by %d in %r.', + id(v.raw_operator), id(op1), v) + v.raw_operator = op2 + v.type = alias
+ + +
[docs]class Topology: + """ + Holds instances on :class:`Scope <skl2onnx.common._topology.Scope>` and + :class:`SklearnModelContainer + <skl2onnx.common._container.SklearnModelContainer>`. + These are filled by the converters while a pipeline is being converted. + """ + + def __init__(self, model, default_batch_size=1, initial_types=None, + target_opset=None, custom_conversion_functions=None, + custom_shape_calculators=None, registered_models=None): + """ + Initializes a *Topology* object, which is an intermediate + representation of a computational graph. + + :param model: RawModelContainer object or one of its derived + classes. It contains the original model. + :param default_batch_size: batch_size prepend to scalar and + array types from CoreML. It's usually + 1 or None. + :param initial_types: A list providing some types for some + root variables. + Each element is a tuple of a variable name and a type defined + in *data_types.py*. + :param custom_conversion_functions: a dictionary for specifying + the user customized conversion function + :param custom_shape_calculators: a dictionary for specifying the + user customized shape calculator + :param registered_models: registered models + """ + self.scopes = [] + self.raw_model = model + self.scope_names = set() + self.initial_types = initial_types if initial_types else list() + self.default_batch_size = default_batch_size + self.target_opset = target_opset + self.custom_conversion_functions = ( + custom_conversion_functions if custom_conversion_functions else {}) + self.custom_shape_calculators = ( + custom_shape_calculators if custom_shape_calculators else {}) + + for k in self.custom_conversion_functions: + if not callable(k): + raise TypeError("Keys in custom_conversion_functions must be " + "types not strings.") + for k in self.custom_shape_calculators: + if not callable(k): + raise TypeError("Keys in custom_shape_calculators must be " + "types not strings.") + + # A map of local overwritten model aliases. + self.model_aliases = {} + all_model_types = (set(self.custom_conversion_functions) + | set(self.custom_shape_calculators)) + for mtype in all_model_types: + alias = "{}_{}".format(mtype.__name__, id(self)) + self.model_aliases[mtype] = alias + + # Registered models + if registered_models is None: + raise AssertionError() + self.registered_models = registered_models + + @property + def scope(self): + if len(self.scopes) != 1: + raise RuntimeError( + "Only one scope is allowed not %d." % len(self.scopes)) + return self.scopes[0] + + @staticmethod + def _generate_unique_name(seed, existing_names): + """ + Produce an unique string based on the seed + :param seed: a string + :param existing_names: a set containing strings which cannot be + produced + :return: a string similar to the seed + """ + if seed == '': + raise ValueError('Name seed must be a non-empty string.') + + # Make the seed meet C-style naming convention + # Only alphabets and numbers are allowed + seed = re.sub('[^\\w+]', '_', seed) + # The first symbol cannot be a number + if re.match('^[0-9]', seed): + seed = '_' + seed + + # If seed has never been seen, we return it as it is. Otherwise, + # we will append an number to make it unique. + if seed not in existing_names: + existing_names.add(seed) + return seed + else: + i = 1 + while seed + str(i) in existing_names: + i += 1 + new_name = seed + str(i) + existing_names.add(new_name) + return new_name + + def get_unique_scope_name(self, seed): + return Topology._generate_unique_name(seed, self.scope_names) + + def declare_scope(self, seed, parent_scopes=None, options=None, + naming=None): + """ + Creates a new :class:`Scope <skl2onnx.common._topology.Scope>` + and appends it to the list of existing scopes. + """ + if len(self.scopes) != 0: + raise RuntimeError( + "Only one scope can be created.") + scope = Scope( + self.get_unique_scope_name(seed), target_opset=self.target_opset, + custom_shape_calculators=self.custom_shape_calculators, + options=options, registered_models=self.registered_models, + naming=naming) + + # Declare input variables. + # They should be the inputs of the scikit-learn + # model you want to convert into ONNX. + for var_name, initial_type in self.initial_types: + scope.declare_local_input(var_name, initial_type, rename=False) + self.scopes.append(scope) + return scope + + def unordered_operator_iterator(self): + for scope in self.scopes: + for operator in scope.operators.values(): + yield operator + + def unordered_variable_iterator(self): + for scope in self.scopes: + for variable in scope.variables.values(): + yield variable + + def call_converter(self, operator, container, verbose=0): + "Calls converter for operator *operator*." + mtype = type(operator.raw_operator) + if mtype in self.custom_conversion_functions: + conv = self.custom_conversion_functions[mtype] + elif operator.type in self.custom_conversion_functions: + conv = self.custom_conversion_functions[operator.type] + elif hasattr(operator.raw_operator, "onnx_converter"): + conv = operator.raw_operator.onnx_converter() + else: + # Convert the selected operator into some ONNX objects and + # save them into the container + try: + conv = _registration.get_converter(operator.type) + except ValueError: + raise MissingConverter( + "Unable to find converter for alias '{}' type " + "'{}'. You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues." + "".format(operator.type, + type(getattr(operator, 'raw_model', None)))) + + container.validate_options(operator) + if verbose > 0: + print("[call_converter] call converter for %r." % operator.type) + logger.debug( + "[Conv] call %r fed %r - %r", operator, + "".join(str(i.is_fed) for i in operator.inputs), + "".join(str(i.is_fed) for i in operator.outputs)) + conv(self.scopes[0], operator, container) + logger.debug("[Conv] end - %r", operator) + + def call_shape_calculator(self, operator): + "Calls shape_calculator for operator *operator*." + mtype = type(operator.raw_operator) + if mtype in self.custom_shape_calculators: + # overwritten operator. + source = 'custom' + shape_calc = self.custom_shape_calculators[mtype] + elif operator.type in self.custom_shape_calculators: + source = 'custom' + shape_calc = self.custom_shape_calculators[operator.type] + elif hasattr(operator.raw_operator, "onnx_shape_calculator"): + source = 'onnx_shape_calculator' + shape_calc = operator.raw_operator.onnx_shape_calculator() + else: + source = "" + shape_calc = None + + if shape_calc is not None: + logger.debug( + "[Shape1] %r fed %r - %r (source=%r)", operator, + ",".join(str(i.is_fed) for i in operator.inputs), + ",".join(str(i.is_fed) for i in operator.outputs), + source) + shape_calc(operator) + else: + logger.debug('[Shape2] call infer_types for %r', operator) + operator.infer_types() + + def _initialize_graph_status_for_traversing(self): + """ + Initialize the status of all variables and operators before + traversing the graph. Only used by convert_operators. + """ + if len(self.scopes) != 1: + raise RuntimeError( + "Only one scope is allowed not %d." % len(self.scopes)) + input_names = set(v.onnx_name for v in self.scopes[0].input_variables) + if len(input_names) == 0: + raise RuntimeError("No detected inputs.") + for variable in self.unordered_variable_iterator(): + is_input = variable.onnx_name in input_names + variable.init_status(is_fed=is_input) + + for operator in self.unordered_operator_iterator(): + operator.init_status(is_evaluated=False) + + def _propagate_status(self, operator, container, fed_variables, + verbose=0): + """ + Propagates status *is_fed* based on output variable + and node added in the container. + """ + if verbose > 1: + print("[_propagate_status] after op=%r" % operator) + vars = {} + for node in container.nodes: + for i in node.input: + if i not in vars: + vars[i] = [] + vars[i].append(node) + + if verbose > 1: + print("[_propagate_status] newly fed=%r" % list( + v.onnx_name for v in operator.outputs if v.is_fed)) + stack = list(fed_variables) + scope = self.scopes[0] + while len(stack) > 0: + nodes = {} + for name in stack: + if name not in vars: + continue + for n in vars[name]: + nodes[id(n)] = n + stack = [] + for node in nodes.values(): + if all(fed_variables.get(n, False) for n in node.input): + for o in node.output: + if o not in fed_variables: + if verbose > 1: + print("[_propagate_status] add=%r" % o) + fed_variables[o] = o + stack.append(o) + if o in scope.variables: + var = scope.variables[o] + var.init_status(is_fed=True) + if verbose > 1: + print("[_propagate_status] fed=%r" % var) + + def convert_operators(self, container=None, verbose=0): + """ + Calls all converters and shape_calculator for existing + operators. It also processes new operators created by + converters. + """ + def _check_operator_(operator): + if not isinstance(operator.inputs, Operator.OperatorList): + raise TypeError( + "operator.inputs must be a Operator.OperatorList " + "not %r." % type(operator.inputs)) + if not isinstance(operator.outputs, Operator.OperatorList): + raise TypeError( + "operator.outputs must be a Operator.OperatorList " + "not %r." % type(operator.outputs)) + if any(not isinstance(i, Variable) for i in operator.inputs): + raise TypeError( + "One input is not a Variable for operator %r - %r." + "" % (type(operator.raw_operator), operator)) + if any(not isinstance(i, Variable) for i in operator.outputs): + raise TypeError( + "One output is not a Variable for operator %r - %r." + "" % (type(operator.raw_operator), operator)) + + def _check_variable_in_(variable, operator): + idop = id(operator) + ids = set(id(op) for op in variable.operators_inputs_) + if idop not in ids: + raise RuntimeError( + "Operator %r not registered in the list of operators " + "of %r taking it as an input [\n%s]." % ( + operator, variable, + "\n".join(map(str, variable.operators_inputs_)))) + + def _check_variable_out_(variable, operator): + if variable.is_fed: + add = ["", "--DEBUG-INFO--"] + for scope in self.scopes: + add.append('---') + add.append(pprint.pformat( + scope.variable_name_mapping)) + add.append('---') + for var in scope.variables.values(): + add.append(" is_fed=%s %s - n_in=%d n_out=%d" % ( + getattr(var, 'is_fed', '?'), var, + len(var.operators_inputs_), + len(var.operators_outputs_))) + add.append('---') + for op in scope.operators.values(): + add.append(" is_evaluated=%s %s" % ( + getattr(op, 'is_evaluated', '?'), op)) + add.append('---') + for v in operator.inputs: + add.append(" inputs={}".format(v)) + for v in operator.outputs: + add.append(" outputs={}".format(v)) + add.append('--- operator producing this variable--') + for op in variable.operators_outputs_: + add.append(str(op)) + raise RuntimeError( + "A variable is already assigned ({}) " + "for operator '{}' (name='{}'). " + "operator.is_evaluated={}, inputs.is_fed={}, " + "outputs.is_fed={}. " + "This may still happen if a converter is a " + "combination of sub-estimators and one " + "of them is producing this output. " + "In that case, an identity node must be " + "added.{}".format( + variable, operator.type, + operator.onnx_name, operator.is_evaluated, + [v.is_fed for v in operator.inputs], + [v.is_fed for v in operator.outputs], + "\n".join(add))) + + if verbose > 0: + print("[convert_operators] begin") + self._initialize_graph_status_for_traversing() + fed_variables = {i.name: i for i in container.initializers} + changes = 1 + n_iter = 0 + while changes > 0: + n_iter += 1 + changes = 0 + ops = list(self.unordered_operator_iterator()) + if verbose > 0: + print("[convert_operators] iteration %d - n_vars=%d " + "n_ops=%d" % ( + n_iter, len(fed_variables), len(ops))) + for operator in ops: + _check_operator_(operator) + for var in operator.inputs: + if var.is_fed: + fed_variables[var.onnx_name] = var + if (all(variable.is_fed for variable in operator.inputs) and + not operator.is_evaluated): + + for variable in operator.inputs: + _check_variable_in_(variable, operator) + for variable in operator.outputs: + _check_variable_out_(variable, operator) + + self.call_shape_calculator(operator) + self.call_converter(operator, container, verbose=verbose) + + # If an operator contains a sequence of operators, + # output variables are not necessarily known at this stage. + operator.init_status(is_evaluated=True) + for variable in operator.outputs: + if all(op.is_evaluated + for op in variable.operators_outputs_): + variable.init_status(is_fed=True) + fed_variables[variable.onnx_name] = variable + fed_variables.update( + {i.name: i for i in container.initializers + if i.name not in fed_variables}) + self._propagate_status(operator, container, fed_variables, + verbose=verbose) + + # unfed some variables (it happens when a node + # shares an output with another node) + rem = [] + for n, var in fed_variables.items(): + if not hasattr(var, 'operators_outputs_'): + # initializer + continue + if any(not o.is_evaluated + for o in var.operators_outputs_): + rem.append(n) + for r in rem: + v = fed_variables[r] + v.init_status(is_fed=False) + del fed_variables[v.onnx_name] + changes += 1 + + if verbose > 0: + print("[convert_operators] end iter: %d - n_vars=%d" % ( + n_iter, len(fed_variables))) + if verbose > 0: + print("[convert_operators] end.") + + # Last verification. + not_evaluated = [] + for op in self.unordered_operator_iterator(): + if not op.is_evaluated: + not_evaluated.append(op) + if len(not_evaluated) > 0: + rows = ["---VARS---"] + for var in self.unordered_variable_iterator(): + rows.append( + "is_fed=%r is_leaf=%r is_root=%r - %r - n_in=%d n_out=%d" + "" % (var.is_fed, var.is_leaf, var.is_root, var, + len(var.operators_inputs_), + len(var.operators_outputs_))) + rows.append("---OPERATORS---") + for op in self.unordered_operator_iterator(): + rows.append("is_eval=%r - %r" % (op.is_evaluated, op)) + rows.append("---NODES---") + for node in container.nodes: + rows.append("%s: %r -> %r" % ( + node.op_type, node.input, node.output)) + raise RuntimeError( + "Not all operators have been evaluated. A variable name " + "is probably misspelled.\n%s" + "" % "\n".join(rows)) + + # Input and output + if len(self.scopes[0].input_variables) > 0: + inputs = self.scopes[0].input_variables + else: + inputs = [v for v in self.unordered_variable_iterator() + if v.is_root] + for i in inputs: + container.add_input(i) + outputs = [v for v in self.unordered_variable_iterator() + if v.is_leaf] + + # The function checks that for output variable, + # raw_name equal onnx_name. It swaps names if it is not the case. + to_swap = [] + for out in outputs: + if out.raw_name != out.onnx_name: + to_swap.append(out) + if len(to_swap) != 0: + swaped = set() + for var in to_swap: + if var.raw_name in swaped: + continue + swaped.add(var.raw_name) + if verbose > 1: + print("[convert_operators] %r <-> %r." % ( + var.raw_name, var.onnx_name)) + old_name = var.onnx_name + new_name = var.raw_name + + try: + container.swap_names(old_name, new_name) + except NotImplementedError as e: + logger.debug( + '[Topo] unable to swap %r and %r (%r).', + old_name, new_name, e) + continue + + for v in self.unordered_variable_iterator(): + if v.onnx_name == old_name: + v.set_onnx_name(new_name) + elif v.onnx_name == new_name: + v.set_onnx_name(old_name) + + for o in outputs: + container.add_output(o)
+ + +
[docs]def convert_topology(topology, model_name, doc_string, target_opset, + channel_first_inputs=None, + options=None, remove_identity=True, + verbose=0): + """ + This function is used to convert our Topology object defined in + _parser.py into a ONNX model (type: ModelProto). + + :param topology: The Topology object we are going to convert + :param model_name: GraphProto's name. Let "model" denote the + returned model. The string "model_name" would be + assigned to "model.graph.name." + :param doc_string: A string attached to the produced model + :param target_opset: number or dictionary, + for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, + a dictionary is used to indicate different opset for + different domains + :param options: see :ref:`l-conv-options` + :param remove_identity: removes identity nodes + include '1.1.2', '1.2', and so on. + :param verbose: displays information while converting + :return: a ONNX ModelProto + """ + if target_opset is None: + target_opset = get_latest_tested_opset_version() + if isinstance(target_opset, dict): + onnx_target_opset = target_opset.get( + '', get_latest_tested_opset_version()) + else: + onnx_target_opset = target_opset + if onnx_target_opset > get_opset_number_from_onnx(): + found = get_opset_number_from_onnx() + raise RuntimeError( + "Parameter target_opset {} > {} is higher than the " + "version of the installed onnx package. See " + "https://github.com/onnx/onnx/blob/master/docs/" + "Versioning.md#released-versions" + ".".format(onnx_target_opset, found)) + if onnx_target_opset > get_latest_tested_opset_version(): + warnings.warn( + "Parameter target_opset {} > {} is higher than the " + "the latest tested version" + ".".format( + onnx_target_opset, + get_latest_tested_opset_version())) + + container = ModelComponentContainer( + target_opset, options=options, + registered_models=topology.registered_models, + white_op=topology.raw_model._white_op, + black_op=topology.raw_model._black_op, + verbose=verbose) + + # Traverse the graph from roots to leaves + # This loop could eventually be parallelized. + topology.convert_operators(container=container, verbose=verbose) + container.ensure_topological_order() + + if len(container.inputs) == 0: + raise RuntimeError("No detected inputs after conversion.") + if len(container.outputs) == 0: + raise RuntimeError("No detected outputs after conversion.") + if verbose >= 2: + print("---NODES---") + for node in container.nodes: + print(" %s - %s: %r -> %r" % ( + node.op_type, node.name, node.input, node.output)) + + # Create a graph from its main components + if container.target_opset_onnx < 9: + # When calling ModelComponentContainer's add_initializer(...), + # nothing is added into the input list. However, for ONNX target + # opset < 9, initializers should also be a part of model's + # (GraphProto) inputs. Thus, we create ValueInfoProto objects + # from initializers (type: TensorProto) directly and then add + # them into model's input list. + extra_inputs = [] # ValueInfoProto list of the initializers + for tensor in container.initializers: + # Sometimes (especially when creating optional input values + # such as RNN's initial hidden state), an initializer is also + # one of the original model's input, so it has been added into + # the container's input list. If this is the case, we need to + # skip one iteration to avoid duplicated inputs. + if tensor.name in [value_info.name for value_info in + container.inputs]: + continue + + # Initializers are always tensors so we can just call + # make_tensor_value_info(...). + value_info = make_tensor_value_info( + tensor.name, tensor.data_type, tensor.dims) + extra_inputs.append(value_info) + + # Before ONNX opset 9, initializers were needed to be passed in + # with inputs. + graph = make_graph(container.nodes, model_name, + container.inputs + extra_inputs, + container.outputs, container.initializers) + else: + # In ONNX opset 9 and above, initializers are included as + # operator inputs and therefore do not need to be passed as + # extra_inputs. + graph = make_graph( + container.nodes, model_name, container.inputs, + container.outputs, container.initializers) + + # Add extra information related to the graph + graph.value_info.extend(container.value_info) + + # Create model + onnx_model = make_model(graph) + + # Update domain version + opv = min(onnx_target_opset, + _get_main_opset_version(onnx_model) or onnx_target_opset) + if not _update_domain_version(container, onnx_model, verbose=verbose): + # Main opset was not added. Doing it here. + op_set = onnx_model.opset_import.add() + op_set.domain = '' + op_set.version = opv + if verbose > 0: + print('[convert_topology] +opset: name=%r, version=%s' % ( + '', opv)) + + # Add extra information + irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION) + onnx_model.ir_version = irv + onnx_model.producer_name = utils.get_producer() + onnx_model.producer_version = utils.get_producer_version() + onnx_model.domain = utils.get_domain() + onnx_model.model_version = utils.get_model_version() + onnx_model.doc_string = doc_string + + # Removes many identity nodes, + # the converter may introduct identity nodes + # after a zipmap operator and onnx <= 1.7 does not + # support that. It does not use onnxconverter-common + # as the optimizer only support opset >= 9. + if remove_identity: + onnx_model = onnx_remove_node_identity(onnx_model) + + return onnx_model
+ + +def _update_domain_version(container, onnx_model, verbose=0): + # Merge operator sets for the same domain, the largest version + # number would be kept + purified_operator_set = dict() + for op_domain, op_version in container.node_domain_version_pair_sets: + if op_domain not in purified_operator_set: + purified_operator_set[op_domain] = op_version + else: + purified_operator_set[op_domain] = max( + purified_operator_set[op_domain], op_version) + + # Fill operator sets + i = 0 + for op_domain, op_version in purified_operator_set.items(): + if op_version is None: + continue + if i == 0 and len(onnx_model.opset_import) == 1: + # Overwrite the default operator set created by + # make_model(...) + op_set = onnx_model.opset_import[0] + else: + # Just create one ONNX element in opset_import + op_set = onnx_model.opset_import.add() + if verbose > 0: + print('[_update_domain_version] +opset %d: name=%r, version=%s' % ( + i, op_domain, op_version)) + op_set.domain = op_domain + if op_set != '': + max_supported = get_default_opset_for_domain(op_domain) + if max_supported is not None and max_supported < op_version: + raise RuntimeError( + "The model is using version %d of domain %r not supported " + "yet by this library. You need to specify " + "target_opset={%r: %r}." % ( + op_version, op_domain, op_domain, max_supported)) + op_set.version = op_version + + i += 1 + if container.target_opset_any_domain(op_domain) < op_version: + raise RuntimeError( + 'The specified opset %d is too low to convert ' + 'this model, which requires at least opset ' + '%d.' % ( + container.target_opset_any_domain(op_domain), + op_version)) + return '' in purified_operator_set + + +def _get_main_opset_version(model): + """ + Returns the main opset version. + """ + mld = None + for op in model.opset_import: + if op.domain == '': + return op.version + if op.domain == "ai.onnx.ml": + mld = op.version + if mld is not None: + return OPSET_ML_TO_OPSET.get(mld, None) + return None +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/convert.html b/_modules/skl2onnx/convert.html index 62a25baaf..db19b56e5 100644 --- a/_modules/skl2onnx/convert.html +++ b/_modules/skl2onnx/convert.html @@ -1,515 +1,611 @@ - - - - - - - - skl2onnx.convert — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.convert

-# SPDX-License-Identifier: Apache-2.0
-
-import warnings
-from uuid import uuid4
-from .proto import get_latest_tested_opset_version
-from .common._topology import convert_topology
-from .common.utils_sklearn import _process_options
-from ._parse import parse_sklearn_model
-
-# Invoke the registration of all our converters and shape calculators.
-from . import shape_calculators  # noqa
-from . import operator_converters  # noqa
-
-
-
[docs]def convert_sklearn(model, name=None, initial_types=None, doc_string='', - target_opset=None, custom_conversion_functions=None, - custom_shape_calculators=None, - custom_parsers=None, options=None, - intermediate=False, - white_op=None, black_op=None, final_types=None, - dtype=None, naming=None, verbose=0): - """ - This function produces an equivalent - ONNX model of the given scikit-learn model. - The supported converters is returned by function - :func:`supported_converters <skl2onnx.supported_converters>`. - - For pipeline conversion, user needs to make sure each component - is one of our supported items. - This function converts the specified *scikit-learn* model - into its *ONNX* counterpart. - Note that for all conversions, initial types are required. - *ONNX* model name can also be specified. - - :param model: A scikit-learn model - :param initial_types: a python list. - Each element is a tuple of a variable name - and a type defined in `data_types.py` - :param name: The name of the graph (type: GraphProto) - in the produced ONNX model (type: ModelProto) - :param doc_string: A string attached onto the produced ONNX model - :param target_opset: number, for example, 7 for - ONNX 1.2, and 8 for ONNX 1.3, - if value is not specified, the function will - choose the latest tested opset - (see :py:func:`skl2onnx.get_latest_tested_opset_version`) - :param custom_conversion_functions: a dictionary for - specifying the user customized conversion function, - it takes precedence over registered converters - :param custom_shape_calculators: a dictionary for - specifying the user customized shape calculator - it takes precedence over registered shape calculators. - :param custom_parsers: parsers determines which outputs - is expected for which particular task, - default parsers are defined for classifiers, - regressors, pipeline but they can be rewritten, - *custom_parsers* is a dictionary - ``{ type: fct_parser(scope, model, inputs, custom_parsers=None) }`` - :param options: specific options given to converters - (see :ref:`l-conv-options`) - :param intermediate: if True, the function returns the - converted model and the instance of :class:`Topology` used, - it returns the converted model otherwise - :param white_op: white list of ONNX nodes allowed - while converting a pipeline, - if empty, all are allowed - :param black_op: black list of ONNX nodes - allowed while converting a pipeline, - if empty, none are blacklisted - :param final_types: a python list. Works the same way as initial_types - but not mandatory, it is used to overwrites the type - (if type is not None) and the name of every output. - :param dtype: removed in version 1.7.5, dtype is - now inferred from input types, - converters may add operators Cast to switch - to double when it is necessary - :param naming: the user may want to change the way intermediate - are named, this parameter can be a string (a prefix) or a - function, which signature is the following: - `get_name(name, existing_names)`, the library will then - check this name is unique and modify it if not - :param verbose: display progress while converting a model - :return: An ONNX model (type: ModelProto) which is - equivalent to the input scikit-learn model - - Example of *initial_types*: - Assume that the specified *scikit-learn* model takes - a heterogeneous list as its input. - If the first 5 elements are floats and the last 10 elements are integers, - we need to specify initial types as below. The [None] in - [None, 5] indicates the batch size here is unknown. - - :: - - from skl2onnx.common.data_types import FloatTensorType, Int64TensorType - initial_type = [('float_input', FloatTensorType([None, 5])), - ('int64_input', Int64TensorType([None, 10]))] - - .. note:: - - If a pipeline includes an instance of - `ColumnTransformer <https://scikit-learn.org/stable/modules/ - generated/sklearn.compose.ColumnTransformer.html>`_, - *scikit-learn* allow the user to specify columns by names. - This option is not supported - by *sklearn-onnx* as features names could be different - in input data and the ONNX graph - (defined by parameter *initial_types*), only integers are supported. - - .. _l-conv-options: - - Converters options - ++++++++++++++++++ - - Some ONNX operators exposes parameters *sklearn-onnx* cannot - guess from the raw model. Some default values are usually suggested - but the users may have to manually overwrite them. This need - is not obvious to do when a model is included in a pipeline. - That's why these options can be given to function *convert_sklearn* - as a dictionary ``{model_type: parameters in a dictionary}`` or - ``{model_id: parameters in a dictionary}``. - Option *sep* is used to specify the delimiters between two words - when the ONNX graph needs to tokenize a string. - The default value is short and may not include all - the necessary values. It can be overwritten as: - - :: - - extra = {TfidfVectorizer: {"separators": [' ', '[.]', '\\\\?', - ',', ';', ':', '\\\\!', '\\\\(', '\\\\)']}} - model_onnx = convert_sklearn( - model, "tfidf", - initial_types=[("input", StringTensorType([None, 1]))], - options=extra) - - But if a pipeline contains two model of the same class, - it is possible to distinguish between the two with function *id*: - - :: - - extra = {id(model): {"separators": [' ', '.', '\\\\?', ',', ';', - ':', '\\\\!', '\\\\(', '\\\\)']}} - model_onnx = convert_sklearn( - pipeline, "pipeline-with-2-tfidf", - initial_types=[("input", StringTensorType([None, 1]))], - options=extra) - - It is used in example :ref:`l-example-tfidfvectorizer`. - - .. versionchanged:: 1.10.0 - Parameter *naming* was added. - """ - if initial_types is None: - if hasattr(model, 'infer_initial_types'): - initial_types = model.infer_initial_types() - else: - raise ValueError('Initial types are required. See usage of ' - 'convert(...) in skl2onnx.convert for details') - - if name is None: - name = str(uuid4().hex) - if dtype is not None: - warnings.warn( - "Parameter dtype is no longer supported. " - "It will be removed in 1.9.0.", - DeprecationWarning) - - target_opset = (target_opset - if target_opset else get_latest_tested_opset_version()) - # Parse scikit-learn model as our internal data structure - # (i.e., Topology) - if verbose >= 1: - print("[convert_sklearn] parse_sklearn_model") - topology = parse_sklearn_model( - model, initial_types, target_opset, custom_conversion_functions, - custom_shape_calculators, custom_parsers, options=options, - white_op=white_op, black_op=black_op, - final_types=final_types, naming=naming) - - # Convert our Topology object into ONNX. The outcome is an ONNX model. - options = _process_options(model, options) - if verbose >= 1: - print("[convert_sklearn] convert_topology") - onnx_model = convert_topology( - topology, name, doc_string, target_opset, options=options, - remove_identity=not intermediate, verbose=verbose) - if verbose >= 1: - print("[convert_sklearn] end") - if verbose >= 2: - scope = topology.scopes[0] - print("---INPUTS---") - for inp in scope.input_variables: - print(" %r" % inp) - print("---OUTPUTS---") - for inp in scope.output_variables: - print(" %r" % inp) - print("---VARIABLES---") - for k, v in sorted(scope.variables.items()): - print(" %r: is.fed=%r is_leaf=%r - %r" % ( - k, v.is_fed, v.is_leaf, v)) - print("---OPERATORS---") - for k, v in sorted(scope.operators.items()): - print(" %r: is.evaluated=%r - %r" % ( - k, v.is_evaluated, v)) - - return (onnx_model, topology) if intermediate else onnx_model
- - -
[docs]def to_onnx(model, X=None, name=None, initial_types=None, - target_opset=None, options=None, - white_op=None, black_op=None, final_types=None, - dtype=None, naming=None, verbose=0): - """ - Calls :func:`convert_sklearn` with simplified parameters. - - :param model: model to convert - :param X: training set, can be None, it is used to infered the - input types (*initial_types*) - :param initial_types: if X is None, then *initial_types* must be - defined - :param target_opset: conversion with a specific target opset - :param options: specific options given to converters - (see :ref:`l-conv-options`) - :param name: name of the model - :param white_op: white list of ONNX nodes allowed - while converting a pipeline, if empty, all are allowed - :param black_op: black list of ONNX nodes allowed - while converting a pipeline, if empty, none are blacklisted - :param final_types: a python list. Works the same way as initial_types - but not mandatory, it is used to overwrites the type - (if type is not None) and the name of every output. - :param dtype: removed in version 1.7.5, dtype is now inferred from - input types, converters may add operators Cast to switch to - double when it is necessary - :param naming: the user may want to change the way intermediate - are named, this parameter can be a string (a prefix) or a - function, which signature is the following: - `get_name(name, existing_names)`, the library will then - check this name is unique and modify it if not - :param verbose: display progress while converting a model - :return: converted model - - This function checks if the model inherits from class - :class:`OnnxOperatorMixin`, it calls method *to_onnx* - in that case otherwise it calls :func:`convert_sklearn`. - - .. versionchanged:: 1.10.0 - Parameter *naming* was added. - """ - from .algebra.onnx_operator_mixin import OnnxOperatorMixin - from .algebra.type_helper import guess_initial_types - - if isinstance(model, OnnxOperatorMixin): - if options is not None: - raise NotImplementedError( - "options not yet implemented for OnnxOperatorMixin.") - return model.to_onnx(X=X, name=name, target_opset=target_opset) - if name is None: - name = "ONNX(%s)" % model.__class__.__name__ - initial_types = guess_initial_types(X, initial_types) - if verbose >= 1: - print("[to_onnx] initial_types=%r" % initial_types) - return convert_sklearn(model, initial_types=initial_types, - target_opset=target_opset, - name=name, options=options, - white_op=white_op, black_op=black_op, - final_types=final_types, dtype=dtype, - verbose=verbose, naming=naming)
- - -def wrap_as_onnx_mixin(model, target_opset=None): - """ - Combines a *scikit-learn* class with :class:`OnnxOperatorMixin` - which produces a new object which combines *scikit-learn* API - and *OnnxOperatorMixin* API. - """ - from .algebra.sklearn_ops import find_class - cl = find_class(model.__class__) - if "automation" in str(cl): - raise RuntimeError("Wrong class name '{}'.".format(cl)) - state = model.__getstate__() - obj = object.__new__(cl) - obj.__setstate__(state) - obj.op_version = target_opset - return obj -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.convert - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.convert

+# SPDX-License-Identifier: Apache-2.0
+
+import warnings
+from uuid import uuid4
+from .proto import get_latest_tested_opset_version
+from .common._topology import convert_topology
+from .common.utils_sklearn import _process_options
+from ._parse import parse_sklearn_model
+
+# Invoke the registration of all our converters and shape calculators.
+from . import shape_calculators  # noqa
+from . import operator_converters  # noqa
+
+
+
[docs]def convert_sklearn(model, name=None, initial_types=None, doc_string='', + target_opset=None, custom_conversion_functions=None, + custom_shape_calculators=None, + custom_parsers=None, options=None, + intermediate=False, + white_op=None, black_op=None, final_types=None, + dtype=None, naming=None, model_optim=True, + verbose=0): + """ + This function produces an equivalent + ONNX model of the given scikit-learn model. + The supported converters is returned by function + :func:`supported_converters <skl2onnx.supported_converters>`. + + For pipeline conversion, user needs to make sure each component + is one of our supported items. + This function converts the specified *scikit-learn* model + into its *ONNX* counterpart. + Note that for all conversions, initial types are required. + *ONNX* model name can also be specified. + + :param model: A scikit-learn model + :param initial_types: a python list. + Each element is a tuple of a variable name + and a type defined in `data_types.py` + :param name: The name of the graph (type: GraphProto) + in the produced ONNX model (type: ModelProto) + :param doc_string: A string attached onto the produced ONNX model + :param target_opset: number, for example, 7 for + ONNX 1.2, and 8 for ONNX 1.3, + if value is not specified, the function will + choose the latest tested opset + (see :py:func:`skl2onnx.get_latest_tested_opset_version`) + :param custom_conversion_functions: a dictionary for + specifying the user customized conversion function, + it takes precedence over registered converters + :param custom_shape_calculators: a dictionary for + specifying the user customized shape calculator + it takes precedence over registered shape calculators. + :param custom_parsers: parsers determines which outputs + is expected for which particular task, + default parsers are defined for classifiers, + regressors, pipeline but they can be rewritten, + *custom_parsers* is a dictionary + ``{ type: fct_parser(scope, model, inputs, custom_parsers=None) }`` + :param options: specific options given to converters + (see :ref:`l-conv-options`) + :param intermediate: if True, the function returns the + converted model and the instance of :class:`Topology` used, + it returns the converted model otherwise + :param white_op: white list of ONNX nodes allowed + while converting a pipeline, + if empty, all are allowed + :param black_op: black list of ONNX nodes + allowed while converting a pipeline, + if empty, none are blacklisted + :param final_types: a python list. Works the same way as initial_types + but not mandatory, it is used to overwrites the type + (if type is not None) and the name of every output. + :param dtype: removed in version 1.7.5, dtype is + now inferred from input types, + converters may add operators Cast to switch + to double when it is necessary + :param naming: the user may want to change the way intermediate + are named, this parameter can be a string (a prefix) or a + function, which signature is the following: + `get_name(name, existing_names)`, the library will then + check this name is unique and modify it if not + :param model_optim: enable or disable model optimisation + after the model was converted into onnx, it reduces the number + of identity nodes + :param verbose: display progress while converting a model + :return: An ONNX model (type: ModelProto) which is + equivalent to the input scikit-learn model + + Example of *initial_types*: + Assume that the specified *scikit-learn* model takes + a heterogeneous list as its input. + If the first 5 elements are floats and the last 10 elements are integers, + we need to specify initial types as below. The [None] in + [None, 5] indicates the batch size here is unknown. + + :: + + from skl2onnx.common.data_types import FloatTensorType, Int64TensorType + initial_type = [('float_input', FloatTensorType([None, 5])), + ('int64_input', Int64TensorType([None, 10]))] + + .. note:: + + If a pipeline includes an instance of + `ColumnTransformer <https://scikit-learn.org/stable/modules/ + generated/sklearn.compose.ColumnTransformer.html>`_, + *scikit-learn* allow the user to specify columns by names. + This option is not supported + by *sklearn-onnx* as features names could be different + in input data and the ONNX graph + (defined by parameter *initial_types*), only integers are supported. + + .. _l-conv-options: + + Converters options + ++++++++++++++++++ + + Some ONNX operators exposes parameters *sklearn-onnx* cannot + guess from the raw model. Some default values are usually suggested + but the users may have to manually overwrite them. This need + is not obvious to do when a model is included in a pipeline. + That's why these options can be given to function *convert_sklearn* + as a dictionary ``{model_type: parameters in a dictionary}`` or + ``{model_id: parameters in a dictionary}``. + Option *sep* is used to specify the delimiters between two words + when the ONNX graph needs to tokenize a string. + The default value is short and may not include all + the necessary values. It can be overwritten as: + + :: + + extra = {TfidfVectorizer: {"separators": [' ', '[.]', '\\\\?', + ',', ';', ':', '\\\\!', '\\\\(', '\\\\)']}} + model_onnx = convert_sklearn( + model, "tfidf", + initial_types=[("input", StringTensorType([None, 1]))], + options=extra) + + But if a pipeline contains two model of the same class, + it is possible to distinguish between the two with function *id*: + + :: + + extra = {id(model): {"separators": [' ', '.', '\\\\?', ',', ';', + ':', '\\\\!', '\\\\(', '\\\\)']}} + model_onnx = convert_sklearn( + pipeline, "pipeline-with-2-tfidf", + initial_types=[("input", StringTensorType([None, 1]))], + options=extra) + + It is used in example :ref:`l-example-tfidfvectorizer`. + + .. versionchanged:: 1.10.0 + Parameter *naming* was added. + """ + if initial_types is None: + if hasattr(model, 'infer_initial_types'): + initial_types = model.infer_initial_types() + else: + raise ValueError('Initial types are required. See usage of ' + 'convert(...) in skl2onnx.convert for details') + + if name is None: + name = str(uuid4().hex) + if dtype is not None: + warnings.warn( + "Parameter dtype is no longer supported. " + "It will be removed in 1.9.0.", + DeprecationWarning) + + target_opset = (target_opset + if target_opset else get_latest_tested_opset_version()) + # Parse scikit-learn model as our internal data structure + # (i.e., Topology) + if verbose >= 1: + print("[convert_sklearn] parse_sklearn_model") + topology = parse_sklearn_model( + model, initial_types, target_opset, custom_conversion_functions, + custom_shape_calculators, custom_parsers, options=options, + white_op=white_op, black_op=black_op, + final_types=final_types, naming=naming) + + # Convert our Topology object into ONNX. The outcome is an ONNX model. + options = _process_options(model, options) + if verbose >= 1: + print("[convert_sklearn] convert_topology") + onnx_model = convert_topology( + topology, name, doc_string, target_opset, options=options, + remove_identity=model_optim and not intermediate, verbose=verbose) + if verbose >= 1: + print("[convert_sklearn] end") + if verbose >= 2: + scope = topology.scopes[0] + print("---INPUTS---") + for inp in scope.input_variables: + print(" %r" % inp) + print("---OUTPUTS---") + for inp in scope.output_variables: + print(" %r" % inp) + print("---VARIABLES---") + for k, v in sorted(scope.variables.items()): + print(" %r: is.fed=%r is_leaf=%r - %r" % ( + k, v.is_fed, v.is_leaf, v)) + print("---OPERATORS---") + for k, v in sorted(scope.operators.items()): + print(" %r: is.evaluated=%r - %r" % ( + k, v.is_evaluated, v)) + + return (onnx_model, topology) if intermediate else onnx_model
+ + +
[docs]def to_onnx(model, X=None, name=None, initial_types=None, + target_opset=None, options=None, + white_op=None, black_op=None, final_types=None, + dtype=None, naming=None, model_optim=True, verbose=0): + """ + Calls :func:`convert_sklearn` with simplified parameters. + + :param model: model to convert + :param X: training set, can be None, it is used to infered the + input types (*initial_types*) + :param initial_types: if X is None, then *initial_types* must be + defined + :param target_opset: conversion with a specific target opset + :param options: specific options given to converters + (see :ref:`l-conv-options`) + :param name: name of the model + :param white_op: white list of ONNX nodes allowed + while converting a pipeline, if empty, all are allowed + :param black_op: black list of ONNX nodes allowed + while converting a pipeline, if empty, none are blacklisted + :param final_types: a python list. Works the same way as initial_types + but not mandatory, it is used to overwrites the type + (if type is not None) and the name of every output. + :param dtype: removed in version 1.7.5, dtype is now inferred from + input types, converters may add operators Cast to switch to + double when it is necessary + :param naming: the user may want to change the way intermediate + are named, this parameter can be a string (a prefix) or a + function, which signature is the following: + `get_name(name, existing_names)`, the library will then + check this name is unique and modify it if not + :param model_optim: enable or disable model optimisation + after the model was converted into onnx, it reduces the number + of identity nodes + :param verbose: display progress while converting a model + :return: converted model + + This function checks if the model inherits from class + :class:`OnnxOperatorMixin`, it calls method *to_onnx* + in that case otherwise it calls :func:`convert_sklearn`. + + .. versionchanged:: 1.10.0 + Parameter *naming* was added. + """ + from .algebra.onnx_operator_mixin import OnnxOperatorMixin + from .algebra.type_helper import guess_initial_types + + if isinstance(model, OnnxOperatorMixin): + if options is not None: + raise NotImplementedError( + "options not yet implemented for OnnxOperatorMixin.") + return model.to_onnx(X=X, name=name, target_opset=target_opset) + if name is None: + name = "ONNX(%s)" % model.__class__.__name__ + initial_types = guess_initial_types(X, initial_types) + if verbose >= 1: + print("[to_onnx] initial_types=%r" % initial_types) + return convert_sklearn(model, initial_types=initial_types, + target_opset=target_opset, + name=name, options=options, + white_op=white_op, black_op=black_op, + final_types=final_types, dtype=dtype, + verbose=verbose, naming=naming, + model_optim=model_optim)
+ + +def wrap_as_onnx_mixin(model, target_opset=None): + """ + Combines a *scikit-learn* class with :class:`OnnxOperatorMixin` + which produces a new object which combines *scikit-learn* API + and *OnnxOperatorMixin* API. + """ + from .algebra.sklearn_ops import find_class + cl = find_class(model.__class__) + if "automation" in str(cl): + raise RuntimeError("Wrong class name '{}'.".format(cl)) + state = model.__getstate__() + obj = object.__new__(cl) + obj.__setstate__(state) + obj.op_version = target_opset + return obj +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/helpers/onnx_helper.html b/_modules/skl2onnx/helpers/onnx_helper.html index 1fbc228d0..4f639bcf6 100644 --- a/_modules/skl2onnx/helpers/onnx_helper.html +++ b/_modules/skl2onnx/helpers/onnx_helper.html @@ -1,689 +1,778 @@ - - - - - - - - skl2onnx.helpers.onnx_helper — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.helpers.onnx_helper

-# SPDX-License-Identifier: Apache-2.0
-
-from logging import getLogger
-from io import BytesIO
-import numpy as np
-import onnx  # noqa
-from onnx import shape_inference, TensorProto
-from onnx.numpy_helper import from_array, to_array
-from onnx.helper import make_tensor
-from ..proto.onnx_helper_modified import (
-    make_node, make_tensor_value_info, make_graph,
-    make_model, ValueInfoProto
-)
-from ..proto import get_latest_tested_opset_version
-from onnx import onnx_pb as onnx_proto
-from ..common._topology import Variable
-
-
-
[docs]def load_onnx_model(onnx_file_or_bytes): - """ - Loads an *ONNX* file. - - :param onnx_file_or_bytes: *ONNX* file or bytes - :return: *ONNX* model - """ - if isinstance(onnx_file_or_bytes, str): - with open(onnx_file_or_bytes, "rb") as f: - return onnx.load(f) - elif hasattr(onnx_file_or_bytes, 'read'): - return onnx.load(onnx_file_or_bytes) - else: - b = BytesIO(onnx_file_or_bytes) - return onnx.load(b)
- - -
[docs]def save_onnx_model(model, filename=None): - """ - Saves a model as a file or bytes. - - :param model: *ONNX* model - :param filename: filename or None to return bytes - :return: bytes - """ - content = model.SerializeToString() - if filename is not None: - if hasattr(filename, 'write'): - filename.write(content) - else: - with open(filename, "wb") as f: - f.write(content) - return content
- - -
[docs]def enumerate_model_node_outputs(model, add_node=False): - """ - Enumerates all the nodes of a model. - - :param model: ONNX graph - :param add_node: if False, the function enumerates - all output names from every node, otherwise, it - enumerates tuple (output name, node) - :return: enumerator - """ - if not hasattr(model, "graph"): - raise TypeError("Parameter model is not an ONNX model but " - "{}".format(type(model))) - for node in model.graph.node: - for out in node.output: - yield (out, node) if add_node else out
- - -def enumerate_model_initializers(model, add_node=False): - """ - Enumerates all the initializers of a model. - - :param model: ONNX graph - :param add_node: if False, the function enumerates - all output names from every node, otherwise, it - enumerates tuple (output name, node) - :return: enumerator - """ - for node in model.graph.initializer: - yield (node.name, node) if add_node else node.name - - -
[docs]def select_model_inputs_outputs(model, outputs=None, inputs=None): - """ - Takes a model and changes its outputs. - - :param model: *ONNX* model - :param inputs: new inputs - :param outputs: new outputs - :return: modified model - - The function removes unneeded files. - """ - if inputs is not None: - raise NotImplementedError("Parameter inputs cannot be empty.") - if outputs is None: - raise RuntimeError("Parameter outputs cannot be None.") - if not isinstance(outputs, list): - outputs = [outputs] - - mark_var = {} - for out in enumerate_model_node_outputs(model): - mark_var[out] = 0 - for inp in model.graph.input: - mark_var[inp.name] = 0 - for out in outputs: - if out not in mark_var: - raise ValueError("Output '{}' not found in model.".format(out)) - mark_var[out] = 1 - - nodes = model.graph.node[::-1] - mark_op = {} - for node in nodes: - mark_op[node.name] = 0 - - # We mark all the nodes we need to keep. - nb = 1 - while nb > 0: - nb = 0 - for node in nodes: - if mark_op[node.name] == 1: - continue - mod = False - for out in node.output: - if mark_var[out] == 1: - mark_op[node.name] = 1 - mod = True - break - if not mod: - continue - - nb += 1 - for inp in node.input: - if mark_var.get(inp, 0) == 1: - continue - mark_var[inp] = 1 - nb += 1 - - # All nodes verifies mark_op[node.name] == 1 - keep_nodes = [node for node in nodes if mark_op[node.name] == 1] - - var_out = [] - for out in outputs: - value_info = ValueInfoProto() - value_info.name = out - var_out.append(value_info) - graph = make_graph(keep_nodes, model.graph.name, model.graph.input, - var_out, model.graph.initializer) - onnx_model = make_model(graph) - onnx_model.ir_version = model.ir_version - onnx_model.producer_name = model.producer_name - onnx_model.producer_version = model.producer_version - onnx_model.domain = model.domain - onnx_model.model_version = model.model_version - onnx_model.doc_string = model.doc_string - if len(model.metadata_props) > 0: - values = {p.key: p.value for p in model.metadata_props} - onnx.helper.set_model_props(onnx_model, values) - - if len(onnx_model.graph.input) != len(model.graph.input): - raise RuntimeError("Input mismatch {} != {}".format( - len(onnx_model.input), len(model.input))) - - # fix opset import - del onnx_model.opset_import[:] - for oimp in model.opset_import: - op_set = onnx_model.opset_import.add() - op_set.domain = oimp.domain - op_set.version = oimp.version - return onnx_model
- - -def infer_outputs(op_type, inputs, outputs=None, initializer=None, - target_opset=None, **atts): - """ - Infers outputs type and shapes given an ONNX operator. - """ - logger = getLogger('skl2onnx') - logger.debug( - '[infer_outputs] op_type=%r inputs=%r outputs=%r', - op_type, [x.name for x in inputs], outputs) - if isinstance(op_type, str): - required_outputs = [] - if outputs: - for o in outputs: - if hasattr(o, 'onnx_name'): - required_outputs.append(o.onnx_name) - elif isinstance(o, str): - required_outputs.append(o) - else: - raise TypeError("Unable to require output {}.".format(o)) - node = make_node(op_type, [i.onnx_name for i in inputs], - required_outputs, **atts) - node = [node] - elif hasattr(op_type, 'nodes'): - node = op_type.nodes - else: - raise RuntimeError("Unable to build ONNX nodes from type {}.".format( - type(op_type))) - - input_init = inputs.copy() - if initializer: - input_init.extend(initializer) - onnx_inputs = [] - for input in input_init: - if isinstance(input, Variable): - onnx_type = input.type.to_onnx_type() - tensor_type = onnx_type.tensor_type - shape = [tensor_type.shape.dim[i].dim_value - for i in range(len(tensor_type.shape.dim))] - inp = make_tensor_value_info(input.onnx_name, - tensor_type.elem_type, - tuple(shape)) - onnx_inputs.append(inp) - elif isinstance(input, onnx.TensorProto): - v = make_tensor_value_info( - input.name, input.data_type.real, - list(d for d in input.dims)) - onnx_inputs.append(v) - elif isinstance(input, onnx.AttributeProto): - value_info = ValueInfoProto() - value_info.name = input.name - onnx_type = onnx_proto.TypeProto() - onnx_type.tensor_type.elem_type = input.type - value_info.type.CopyFrom(onnx_type) - onnx_inputs.append(value_info) - else: - onnx_inputs.append(input) - - graph = make_graph(node, 'infer_shapes', - onnx_inputs, []) - original_model = make_model(graph, producer_name='skl2onnx') - domains = {} - for n in node: - domains[n.domain] = max(domains.get(n.domain, 1), - getattr(n, 'op_version', 1)) - for i, (k, v) in enumerate(domains.items()): - if i == 0 and len(original_model.opset_import) == 1: - op_set = original_model.opset_import[0] - else: - op_set = original_model.opset_import.add() - op_set.domain = k - if target_opset: - if isinstance(target_opset, dict): - op_set.version = target_opset.get( - k, get_latest_tested_opset_version()) - else: - op_set.version = target_opset - else: - op_set.version = get_latest_tested_opset_version() - - try: - inferred_model = shape_inference.infer_shapes(original_model) - except RuntimeError as e: - raise RuntimeError( - "Unable to infer shape of node '{}'\n{}".format( - op_type, original_model)) from e - all_shapes = Variable.from_pb(inferred_model.graph.value_info) - used = set() - for node in graph.node: - for name in node.input: - used.add(name) - shapes = [shape for shape in all_shapes if shape.onnx_name not in used] - if len(shapes) == 0: - raise RuntimeError("Shape inference fails.\n" - "*Inputs*\n{}\n*Model*\n{}'".format( - onnx_inputs, original_model)) - logger.debug('[infer_outputs] shapes=%r', shapes) - return shapes - - -def change_onnx_domain(model, ops): - """ - Takes a model and changes its outputs. - - :param model: *ONNX* model - :param ops: dictionary { optype: ('optype', 'new domain') } - :return: modified model - - The function removes unneeded files. - """ - nodes = model.graph.node - for node in nodes: - rep = ops.get(node.op_type, None) - if rep is None: - continue - node.op_type = rep[0] - node.domain = rep[1] - - graph = make_graph(nodes, model.graph.name, model.graph.input, - model.graph.output, model.graph.initializer) - onnx_model = make_model(graph) - onnx_model.ir_version = model.ir_version - onnx_model.producer_name = model.producer_name - onnx_model.producer_version = model.producer_version - onnx_model.domain = model.domain - onnx_model.model_version = model.model_version - onnx_model.doc_string = model.doc_string - if len(model.metadata_props) > 0: - values = {p.key: p.value for p in model.metadata_props} - onnx.helper.set_model_props(onnx_model, values) - - if len(onnx_model.graph.input) != len(model.graph.input): - raise RuntimeError("Input mismatch {} != {}".format( - len(onnx_model.input), len(model.input))) - - # fix opset import - domain_set = set() - has_domain = False - del onnx_model.opset_import[:] - for oimp in model.opset_import: - op_set = onnx_model.opset_import.add() - op_set.domain = oimp.domain - op_set.version = oimp.version - domain_set.add(oimp.domain) - if not has_domain: - has_domain = oimp.domain in domain_set - for v in ops.values(): - if v[1] not in domain_set: - op_set = onnx_model.opset_import.add() - op_set.domain = v[1] - op_set.version = 1 - return onnx_model - - -def add_output_initializer(model_onnx, name, value, suffix='_init'): - """ - Add a constant and link it to one output. - It allows the user to store arrays into the graph - and retrieve them when using it. - The initializer is named `name + suffix`, the output - is named `name`. - - :param model_onnx: ONNX graph - :param name: initializer name (initializer name, output name) - :param value: array to store - :param suffix: name of the initializer - :return: new model - - It is possible to add multiple constant by using list: - ``add_output_initializer(model_onnx, ['name1', 'name2'], [v1, v2])``. - """ - if isinstance(name, str): - name_list = [name] - value_list = [value] - else: - name_list = name - value_list = value - - if len(name_list) != len(value_list): - raise ValueError( - "Mismatched names and values. There are %d names and %d values." - "" % (len(name_list), len(value_list))) - - nodes = list(model_onnx.graph.node) - inits = list(model_onnx.graph.initializer) - outputs = list(model_onnx.graph.output) - - for name, value in zip(name_list, value_list): - name_output = name - name_init = name + suffix - names = set(i.name for i in model_onnx.graph.initializer) - if name_output in names or name_init in names: - raise ValueError( - "Names %r or %r is already taken by an initializer: %r." % ( - name_output, name_init, ", ".join(sorted(names)))) - names = set(i.name for i in model_onnx.graph.output) - if name_output in names or name_init in names: - raise ValueError( - "Names %r or %r is already taken by an output: %r." % ( - name_output, name_init, ", ".join(sorted(names)))) - names = set(i.name for i in model_onnx.graph.input) - if name_output in names or name_init in names: - raise ValueError( - "Names %r or %r is already taken by an output: %r." % ( - name_output, name_init, ", ".join(sorted(names)))) - - try: - cst = from_array(value, name=name_init) - except RuntimeError as e: - st = str(value.dtype).lower() - if st.startswith('u') or st.startswith("<u"): - cst_value = np.array([s.encode('utf-8') for s in value]) - cst = make_tensor( - name_init, data_type=TensorProto.STRING, - dims=value.shape, vals=list(cst_value)) - else: - raise e - - inits.append(cst) - - outputs.append(make_tensor_value_info( - name_output, cst.data_type, cst.dims)) - - nodes.append(make_node('Identity', [name_init], [name_output])) - - graph = make_graph( - nodes, model_onnx.graph.name, model_onnx.graph.input, - outputs, inits) - - onnx_model = make_model(graph) - onnx_model.ir_version = model_onnx.ir_version - onnx_model.producer_name = model_onnx.producer_name - onnx_model.producer_version = model_onnx.producer_version - onnx_model.domain = model_onnx.domain - onnx_model.model_version = model_onnx.model_version - onnx_model.doc_string = model_onnx.doc_string - if len(model_onnx.metadata_props) > 0: - values = {p.key: p.value for p in model_onnx.metadata_props} - onnx.helper.set_model_props(onnx_model, values) - - if len(onnx_model.graph.input) != len(model_onnx.graph.input): - raise RuntimeError("Input mismatch {} != {}".format( - len(onnx_model.input), len(model_onnx.input))) - - # fix opset import - del onnx_model.opset_import[:] - for oimp in model_onnx.opset_import: - op_set = onnx_model.opset_import.add() - op_set.domain = oimp.domain - op_set.version = oimp.version - return onnx_model - - -def get_initializers(model_onnx): - """ - Retrieves the list of initializers in a model in a - dictionary `{ name: value }`. - """ - res = {} - for init in model_onnx.graph.initializer: - res[init.name] = to_array(init) - return res - - -def update_onnx_initializers(model_onnx, new_inits): - """ - Updates initializer in a ONNX model. - - :param model_onnx: ONNX model - :param new_inits: new initializers - :return: list of updated initializers - """ - updated = [] - replace_weights = [] - replace_indices = [] - for i, w in enumerate(model_onnx.graph.initializer): - if w.name in new_inits: - replace_weights.append(from_array(new_inits[w.name], w.name)) - replace_indices.append(i) - updated.append(w.name) - replace_indices.sort(reverse=True) - for w_i in replace_indices: - del model_onnx.graph.initializer[w_i] - model_onnx.graph.initializer.extend(replace_weights) - return updated -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.helpers.onnx_helper - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.helpers.onnx_helper

+# SPDX-License-Identifier: Apache-2.0
+
+from logging import getLogger
+from io import BytesIO
+import numpy as np
+import onnx  # noqa
+from onnx import shape_inference, TensorProto
+from onnx.numpy_helper import from_array, to_array
+from onnx.helper import make_tensor
+from ..proto.onnx_helper_modified import (
+    make_node, make_tensor_value_info, make_graph,
+    make_model, ValueInfoProto
+)
+from ..proto import get_latest_tested_opset_version
+from onnx import onnx_pb as onnx_proto
+from ..common._topology import Variable
+
+
+
[docs]def load_onnx_model(onnx_file_or_bytes): + """ + Loads an *ONNX* file. + + :param onnx_file_or_bytes: *ONNX* file or bytes + :return: *ONNX* model + """ + if isinstance(onnx_file_or_bytes, str): + with open(onnx_file_or_bytes, "rb") as f: + return onnx.load(f) + elif hasattr(onnx_file_or_bytes, 'read'): + return onnx.load(onnx_file_or_bytes) + else: + b = BytesIO(onnx_file_or_bytes) + return onnx.load(b)
+ + +
[docs]def save_onnx_model(model, filename=None): + """ + Saves a model as a file or bytes. + + :param model: *ONNX* model + :param filename: filename or None to return bytes + :return: bytes + """ + content = model.SerializeToString() + if filename is not None: + if hasattr(filename, 'write'): + filename.write(content) + else: + with open(filename, "wb") as f: + f.write(content) + return content
+ + +
[docs]def enumerate_model_node_outputs(model, add_node=False): + """ + Enumerates all the nodes of a model. + + :param model: ONNX graph + :param add_node: if False, the function enumerates + all output names from every node, otherwise, it + enumerates tuple (output name, node) + :return: enumerator + """ + if not hasattr(model, "graph"): + raise TypeError("Parameter model is not an ONNX model but " + "{}".format(type(model))) + for node in model.graph.node: + for out in node.output: + yield (out, node) if add_node else out
+ + +def enumerate_model_initializers(model, add_node=False): + """ + Enumerates all the initializers of a model. + + :param model: ONNX graph + :param add_node: if False, the function enumerates + all output names from every node, otherwise, it + enumerates tuple (output name, node) + :return: enumerator + """ + for node in model.graph.initializer: + yield (node.name, node) if add_node else node.name + + +
[docs]def select_model_inputs_outputs(model, outputs=None, inputs=None): + """ + Takes a model and changes its outputs. + + :param model: *ONNX* model + :param inputs: new inputs + :param outputs: new outputs + :return: modified model + + The function removes unneeded files. + """ + if inputs is not None: + raise NotImplementedError("Parameter inputs cannot be empty.") + if outputs is None: + raise RuntimeError("Parameter outputs cannot be None.") + if not isinstance(outputs, list): + outputs = [outputs] + + mark_var = {} + for out in enumerate_model_node_outputs(model): + mark_var[out] = 0 + for inp in model.graph.input: + mark_var[inp.name] = 0 + for out in outputs: + if out not in mark_var: + raise ValueError("Output '{}' not found in model.".format(out)) + mark_var[out] = 1 + + nodes = model.graph.node[::-1] + mark_op = {} + for node in nodes: + mark_op[node.name] = 0 + + # We mark all the nodes we need to keep. + nb = 1 + while nb > 0: + nb = 0 + for node in nodes: + if mark_op[node.name] == 1: + continue + mod = False + for out in node.output: + if mark_var[out] == 1: + mark_op[node.name] = 1 + mod = True + break + if not mod: + continue + + nb += 1 + for inp in node.input: + if mark_var.get(inp, 0) == 1: + continue + mark_var[inp] = 1 + nb += 1 + + # All nodes verifies mark_op[node.name] == 1 + keep_nodes = [node for node in nodes[::-1] if mark_op[node.name] == 1] + + var_out = [] + for out in outputs: + value_info = ValueInfoProto() + value_info.name = out + var_out.append(value_info) + graph = make_graph(keep_nodes, model.graph.name, model.graph.input, + var_out, model.graph.initializer) + onnx_model = make_model(graph) + onnx_model.ir_version = model.ir_version + onnx_model.producer_name = model.producer_name + onnx_model.producer_version = model.producer_version + onnx_model.domain = model.domain + onnx_model.model_version = model.model_version + onnx_model.doc_string = model.doc_string + if len(model.metadata_props) > 0: + values = {p.key: p.value for p in model.metadata_props} + onnx.helper.set_model_props(onnx_model, values) + + if len(onnx_model.graph.input) != len(model.graph.input): + raise RuntimeError("Input mismatch {} != {}".format( + len(onnx_model.input), len(model.input))) + + # fix opset import + del onnx_model.opset_import[:] + for oimp in model.opset_import: + op_set = onnx_model.opset_import.add() + op_set.domain = oimp.domain + op_set.version = oimp.version + return onnx_model
+ + +def infer_outputs(op_type, inputs, outputs=None, initializer=None, + target_opset=None, **atts): + """ + Infers outputs type and shapes given an ONNX operator. + """ + logger = getLogger('skl2onnx') + logger.debug( + '[infer_outputs] op_type=%r inputs=%r outputs=%r', + op_type, [x.name for x in inputs], outputs) + if isinstance(op_type, str): + required_outputs = [] + if outputs: + for o in outputs: + if hasattr(o, 'onnx_name'): + required_outputs.append(o.onnx_name) + elif isinstance(o, str): + required_outputs.append(o) + else: + raise TypeError("Unable to require output {}.".format(o)) + node = make_node(op_type, [i.onnx_name for i in inputs], + required_outputs, **atts) + node = [node] + elif hasattr(op_type, 'nodes'): + node = op_type.nodes + else: + raise RuntimeError("Unable to build ONNX nodes from type {}.".format( + type(op_type))) + + input_init = inputs.copy() + if initializer: + input_init.extend(initializer) + onnx_inputs = [] + for input in input_init: + if isinstance(input, Variable): + onnx_type = input.type.to_onnx_type() + tensor_type = onnx_type.tensor_type + shape = [tensor_type.shape.dim[i].dim_value + for i in range(len(tensor_type.shape.dim))] + inp = make_tensor_value_info(input.onnx_name, + tensor_type.elem_type, + tuple(shape)) + onnx_inputs.append(inp) + elif isinstance(input, onnx.TensorProto): + v = make_tensor_value_info( + input.name, input.data_type.real, + list(d for d in input.dims)) + onnx_inputs.append(v) + elif isinstance(input, onnx.AttributeProto): + value_info = ValueInfoProto() + value_info.name = input.name + onnx_type = onnx_proto.TypeProto() + onnx_type.tensor_type.elem_type = input.type + value_info.type.CopyFrom(onnx_type) + onnx_inputs.append(value_info) + else: + onnx_inputs.append(input) + + graph = make_graph(node, 'infer_shapes', + onnx_inputs, []) + original_model = make_model(graph, producer_name='skl2onnx') + domains = {} + for n in node: + domains[n.domain] = max(domains.get(n.domain, 1), + getattr(n, 'op_version', 1)) + for i, (k, v) in enumerate(domains.items()): + if i == 0 and len(original_model.opset_import) == 1: + op_set = original_model.opset_import[0] + else: + op_set = original_model.opset_import.add() + op_set.domain = k + if target_opset: + if isinstance(target_opset, dict): + op_set.version = target_opset.get( + k, get_latest_tested_opset_version()) + else: + op_set.version = target_opset + else: + op_set.version = get_latest_tested_opset_version() + + try: + inferred_model = shape_inference.infer_shapes(original_model) + except RuntimeError as e: + raise RuntimeError( + "Unable to infer shape of node '{}'\n{}".format( + op_type, original_model)) from e + all_shapes = Variable.from_pb(inferred_model.graph.value_info) + used = set() + for node in graph.node: + for name in node.input: + used.add(name) + shapes = [shape for shape in all_shapes if shape.onnx_name not in used] + if len(shapes) == 0: + raise RuntimeError( + f"Shape inference fails.\n*Inputs*\n{onnx_inputs}\n" + f"*all_shapes*\n{all_shapes}'\n" + f"*Model*\n{original_model}'") + logger.debug('[infer_outputs] shapes=%r', shapes) + return shapes + + +def change_onnx_domain(model, ops): + """ + Takes a model and changes its outputs. + + :param model: *ONNX* model + :param ops: dictionary { optype: ('optype', 'new domain') } + :return: modified model + + The function removes unneeded files. + """ + nodes = model.graph.node + for node in nodes: + rep = ops.get(node.op_type, None) + if rep is None: + continue + node.op_type = rep[0] + node.domain = rep[1] + + graph = make_graph(nodes, model.graph.name, model.graph.input, + model.graph.output, model.graph.initializer) + onnx_model = make_model(graph) + onnx_model.ir_version = model.ir_version + onnx_model.producer_name = model.producer_name + onnx_model.producer_version = model.producer_version + onnx_model.domain = model.domain + onnx_model.model_version = model.model_version + onnx_model.doc_string = model.doc_string + if len(model.metadata_props) > 0: + values = {p.key: p.value for p in model.metadata_props} + onnx.helper.set_model_props(onnx_model, values) + + if len(onnx_model.graph.input) != len(model.graph.input): + raise RuntimeError("Input mismatch {} != {}".format( + len(onnx_model.input), len(model.input))) + + # fix opset import + domain_set = set() + has_domain = False + del onnx_model.opset_import[:] + for oimp in model.opset_import: + op_set = onnx_model.opset_import.add() + op_set.domain = oimp.domain + op_set.version = oimp.version + domain_set.add(oimp.domain) + if not has_domain: + has_domain = oimp.domain in domain_set + for v in ops.values(): + if v[1] not in domain_set: + op_set = onnx_model.opset_import.add() + op_set.domain = v[1] + op_set.version = 1 + return onnx_model + + +def add_output_initializer(model_onnx, name, value, suffix='_init'): + """ + Add a constant and link it to one output. + It allows the user to store arrays into the graph + and retrieve them when using it. + The initializer is named `name + suffix`, the output + is named `name`. + + :param model_onnx: ONNX graph + :param name: initializer name (initializer name, output name) + :param value: array to store + :param suffix: name of the initializer + :return: new model + + It is possible to add multiple constant by using list: + ``add_output_initializer(model_onnx, ['name1', 'name2'], [v1, v2])``. + """ + if isinstance(name, str): + name_list = [name] + value_list = [value] + else: + name_list = name + value_list = value + + if len(name_list) != len(value_list): + raise ValueError( + "Mismatched names and values. There are %d names and %d values." + "" % (len(name_list), len(value_list))) + + nodes = list(model_onnx.graph.node) + inits = list(model_onnx.graph.initializer) + outputs = list(model_onnx.graph.output) + + for name, value in zip(name_list, value_list): + name_output = name + name_init = name + suffix + names = set(i.name for i in model_onnx.graph.initializer) + if name_output in names or name_init in names: + raise ValueError( + "Names %r or %r is already taken by an initializer: %r." % ( + name_output, name_init, ", ".join(sorted(names)))) + names = set(i.name for i in model_onnx.graph.output) + if name_output in names or name_init in names: + raise ValueError( + "Names %r or %r is already taken by an output: %r." % ( + name_output, name_init, ", ".join(sorted(names)))) + names = set(i.name for i in model_onnx.graph.input) + if name_output in names or name_init in names: + raise ValueError( + "Names %r or %r is already taken by an output: %r." % ( + name_output, name_init, ", ".join(sorted(names)))) + + try: + cst = from_array(value, name=name_init) + except RuntimeError as e: + st = str(value.dtype).lower() + if st.startswith('u') or st.startswith("<u"): + cst_value = np.array([s.encode('utf-8') for s in value]) + cst = make_tensor( + name_init, data_type=TensorProto.STRING, + dims=value.shape, vals=list(cst_value)) + else: + raise e + + inits.append(cst) + + outputs.append(make_tensor_value_info( + name_output, cst.data_type, cst.dims)) + + nodes.append(make_node('Identity', [name_init], [name_output])) + + graph = make_graph( + nodes, model_onnx.graph.name, model_onnx.graph.input, + outputs, inits) + + onnx_model = make_model(graph) + onnx_model.ir_version = model_onnx.ir_version + onnx_model.producer_name = model_onnx.producer_name + onnx_model.producer_version = model_onnx.producer_version + onnx_model.domain = model_onnx.domain + onnx_model.model_version = model_onnx.model_version + onnx_model.doc_string = model_onnx.doc_string + if len(model_onnx.metadata_props) > 0: + values = {p.key: p.value for p in model_onnx.metadata_props} + onnx.helper.set_model_props(onnx_model, values) + + if len(onnx_model.graph.input) != len(model_onnx.graph.input): + raise RuntimeError("Input mismatch {} != {}".format( + len(onnx_model.input), len(model_onnx.input))) + + # fix opset import + del onnx_model.opset_import[:] + for oimp in model_onnx.opset_import: + op_set = onnx_model.opset_import.add() + op_set.domain = oimp.domain + op_set.version = oimp.version + return onnx_model + + +def get_initializers(model_onnx): + """ + Retrieves the list of initializers in a model in a + dictionary `{ name: value }`. + """ + res = {} + for init in model_onnx.graph.initializer: + res[init.name] = to_array(init) + return res + + +def update_onnx_initializers(model_onnx, new_inits): + """ + Updates initializer in a ONNX model. + + :param model_onnx: ONNX model + :param new_inits: new initializers + :return: list of updated initializers + """ + updated = [] + replace_weights = [] + replace_indices = [] + for i, w in enumerate(model_onnx.graph.initializer): + if w.name in new_inits: + replace_weights.append(from_array(new_inits[w.name], w.name)) + replace_indices.append(i) + updated.append(w.name) + replace_indices.sort(reverse=True) + for w_i in replace_indices: + del model_onnx.graph.initializer[w_i] + model_onnx.graph.initializer.extend(replace_weights) + return updated +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/operator_converters/text_vectoriser.html b/_modules/skl2onnx/operator_converters/text_vectoriser.html index fbf62319d..f23c9a6b6 100644 --- a/_modules/skl2onnx/operator_converters/text_vectoriser.html +++ b/_modules/skl2onnx/operator_converters/text_vectoriser.html @@ -1,655 +1,798 @@ - - - - - - - - skl2onnx.operator_converters.text_vectoriser — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.operator_converters.text_vectoriser

-# SPDX-License-Identifier: Apache-2.0
-
-
-import warnings
-from collections import OrderedDict
-import numpy as np
-from ..common._apply_operation import (
-    apply_cast, apply_reshape, apply_identity)
-from ..common._registration import register_converter
-from ..common._topology import Scope, Operator
-from ..common._container import ModelComponentContainer
-from ..common.data_types import guess_proto_type, StringTensorType
-from ..proto import onnx_proto
-from ..algebra.onnx_ops import OnnxStringNormalizer
-
-
-def _intelligent_split(text, op, tokenizer, existing):
-    """
-    Splits text into tokens. *scikit-learn*
-    merges tokens with ``' '.join(tokens)``
-    to name ngrams. ``'a  b'`` could be ``('a ', 'b')``
-    or ``('a', ' b')``.
-    See `ngram sequence
-    <https://github.com/scikit-learn/scikit-learn/blob/master/
-    sklearn/feature_extraction/text.py#L169>`_.
-    """
-    if op.analyzer == 'word':
-        if op.ngram_range[0] == op.ngram_range[1] == 1:
-            spl = [text]
-        elif op.ngram_range[0] == 1 and len(text) >= 2:
-            # Every element is in the vocabulary.
-            # Naive method
-            p1 = len(text) - len(text.lstrip())
-            p2_ = len(text) - len(text.rstrip())
-            if p2_ == 0:
-                p2 = len(text)
-            else:
-                p2 = -p2_
-            spl = text[p1:p2].split()
-            if len(spl) <= 1:
-                spl = [text]
-            else:
-                spl[0] = " " * p1 + spl[0]
-                spl[-1] = spl[-1] + " " * p2_
-            if any(map(lambda g: g not in op.vocabulary_, spl)):
-                # TODO: handle this case with an algorithm
-                # which is able to break a string into
-                # known substrings.
-                raise RuntimeError("Unable to split n-grams '{}' "
-                                   "into tokens existing in the "
-                                   "vocabulary. This happens when "
-                                   "a token contain spaces.".format(text))
-        else:
-            # We reuse the tokenizer hoping that will clear
-            # ambiguities but this might be slow.
-            spl = tokenizer(text)
-    else:
-        spl = list(text)
-
-    spl = tuple(spl)
-    if spl in existing:
-        raise RuntimeError("The converter cannot guess how to "
-                           "split an expression into tokens. "
-                           "This happens when "
-                           "a token contain spaces.")
-    if op.ngram_range[0] == 1 and \
-            (len(op.ngram_range) == 1 or op.ngram_range[1] > 1):
-        # All grams should be existing in the vocabulary.
-        for g in spl:
-            if g not in op.vocabulary_:
-                nos = g.replace(" ", "")
-                couples = [(w, w.replace(" ", "")) for w in op.vocabulary_]
-                possible = ['{}'.format(w[0])
-                            for w in couples if w[1] == nos]
-                raise RuntimeError(
-                    "Unable to split n-grams '{}' due to '{}' "
-                    "into tokens existing in the "
-                    "vocabulary. This happens when "
-                    "a token contain spaces. Ambiguity found is '{}' "
-                    ".".format(text, g, possible))
-    existing.add(spl)
-    return spl
-
-
-
[docs]def convert_sklearn_text_vectorizer(scope: Scope, operator: Operator, - container: ModelComponentContainer): - """ - Converters for class - `TfidfVectorizer <https://scikit-learn.org/stable/modules/generated/ - sklearn.feature_extraction.text.TfidfVectorizer.html>`_. - The current implementation is a work in progress and the ONNX version - does not produce the exact same results. The converter lets the user - change some of its parameters. - - Additional options - ------------------ - - tokenexp: string - The default will change to true in version 1.6.0. - The tokenizer splits into words using this regular - expression or the regular expression specified by - *scikit-learn* is the value is an empty string. - See also note below. - Default value: None - separators: list of separators - These separators are used to split a string into words. - Options *separators* is ignore if options *tokenexp* is not None. - Default value: ``[' ', '[.]', '\\\\?', ',', ';', ':', '\\\\!']``. - - Example (from :ref:`l-example-tfidfvectorizer`): - - :: - - seps = {TfidfVectorizer: {"separators": [' ', '[.]', '\\\\?', ',', ';', - ':', '!', '\\\\(', '\\\\)', - '\\n', '\\\\"', "'", "-", - "\\\\[", "\\\\]", "@"]}} - model_onnx = convert_sklearn(pipeline, "tfidf", - initial_types=[("input", StringTensorType([None, 2]))], - options=seps) - - The default regular expression of the tokenizer is ``(?u)\\\\b\\\\w\\\\w+\\\\b`` - (see `re <https://docs.python.org/3/library/re.html>`_). - This expression may not supported by the library handling the backend. - `onnxruntime <https://github.com/Microsoft/onnxruntime>`_ uses - `re2 <https://github.com/google/re2>`_. You may need to switch - to a custom tokenizer based on - `python wrapper for re2 <https://pypi.org/project/re2/>`_ - or its sources `pyre2 <https://github.com/facebook/pyre2>`_ - (`syntax <https://github.com/google/re2/blob/master/doc/syntax.txt>`_). - If the regular expression is not specified and if - the instance of TfidfVectorizer is using the default - pattern ``(?u)\\\\b\\\\w\\\\w+\\\\b``, it is replaced by - ``[a-zA-Z0-9_]+``. Any other case has to be - manually handled. - - Regular expression ``[^\\\\\\\\n]`` is used to split - a sentance into character (and not works) if ``analyser=='char'``. - The mode ``analyser=='char_wb'`` is not implemented. - - .. versionchanged:: 1.6 - Parameters have been renamed: *sep* into *separators*, - *regex* into *tokenexp*. - ```` - - """ # noqa - op = operator.raw_operator - - if (container.target_opset is not None and - container.target_opset < 9): - raise RuntimeError( - "Converter for '{}' only works for opset >= 9." - "".format(op.__class__.__name__)) - - if op.analyzer == "char_wb": - raise NotImplementedError( - "CountVectorizer cannot be converted, " - "only tokenizer='word' is fully supported. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.") - if op.analyzer == "char": - warnings.warn( - "The conversion of CountVectorizer may not work. " - "only tokenizer='word' is fully supported. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.", - UserWarning) - if op.strip_accents is not None: - raise NotImplementedError( - "CountVectorizer cannot be converted, " - "only stip_accents=None is supported. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.") - - options = container.get_options( - op, dict(separators="DEFAULT", - tokenexp=None, - nan=False, - keep_empty_string=False)) - if set(options) != {'separators', 'tokenexp', 'nan', 'keep_empty_string'}: - raise RuntimeError("Unknown option {} for {}".format( - set(options) - {'separators'}, type(op))) - - if op.analyzer == 'word': - default_pattern = '(?u)\\b\\w\\w+\\b' - if options['separators'] == "DEFAULT" and options['tokenexp'] is None: - warnings.warn("Converter for TfidfVectorizer will use " - "scikit-learn regular expression by default " - "in version 1.6.", - UserWarning) - regex = op.token_pattern - if regex == default_pattern: - regex = '[a-zA-Z0-9_]+' - default_separators = None - elif options['tokenexp'] is not None: - if options['tokenexp']: - regex = options['tokenexp'] - else: - regex = op.token_pattern - if regex == default_pattern: - regex = '[a-zA-Z0-9_]+' - default_separators = None - else: - regex = None - default_separators = options['separators'] - else: - if options['separators'] != 'DEFAULT': - raise RuntimeError("Option separators has no effect " - "if analyser != 'word'.") - regex = options['tokenexp'] if options['tokenexp'] else '.' - default_separators = None - - if op.preprocessor is not None: - raise NotImplementedError( - "Custom preprocessor cannot be converted into ONNX. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.") - if op.tokenizer is not None: - raise NotImplementedError( - "Custom tokenizer cannot be converted into ONNX. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.") - if op.strip_accents is not None: - raise NotImplementedError( - "Operator StringNormalizer cannot remove accents. " - "You may raise an issue at " - "https://github.com/onnx/sklearn-onnx/issues.") - - if hasattr(op, "stop_words_"): - stop_words = op.stop_words_ | ( - set(op.stop_words) if op.stop_words else set()) - else: - stop_words = set() - - if op.lowercase or stop_words: - if len(operator.input_full_names) != 1: - raise RuntimeError("Only one input is allowed, found {}.".format( - operator.input_full_names)) - - # StringNormalizer - op_type = 'StringNormalizer' - attrs = {'name': scope.get_unique_operator_name(op_type)} - normalized = scope.get_unique_variable_name('normalized') - if container.target_opset >= 10: - attrs.update({ - 'case_change_action': 'LOWER', - 'is_case_sensitive': not op.lowercase, - }) - op_version = 10 - domain = '' - else: - attrs.update({ - 'casechangeaction': 'LOWER', - 'is_case_sensitive': not op.lowercase, - }) - op_version = 9 - domain = 'com.microsoft' - opvs = 1 if domain == 'com.microsoft' else op_version - if stop_words: - attrs['stopwords'] = list(sorted(stop_words)) - - if options['keep_empty_string']: - del attrs['name'] - op_norm = OnnxStringNormalizer( - 'text_in', op_version=container.target_opset, - output_names=['text_out'], **attrs) - scan_body = op_norm.to_onnx( - OrderedDict([('text_in', StringTensorType())]), - outputs=[('text_out', StringTensorType())], - target_opset=op_version) - - vector = scope.get_unique_variable_name('vector') - apply_reshape(scope, operator.input_full_names[0], - vector, container, - desired_shape=(-1, 1)) - container.add_node('Scan', vector, normalized, - body=scan_body.graph, num_scan_inputs=1) - else: - flatten = scope.get_unique_variable_name('flattened') - apply_reshape(scope, operator.input_full_names[0], - flatten, container, - desired_shape=(-1, )) - container.add_node(op_type, flatten, - normalized, op_version=opvs, - op_domain=domain, **attrs) - else: - normalized = operator.input_full_names - - # Tokenizer - padvalue = "#" - while padvalue in op.vocabulary_: - padvalue += "#" - - op_type = 'Tokenizer' - attrs = {'name': scope.get_unique_operator_name(op_type)} - attrs.update({ - 'pad_value': padvalue, - 'mark': False, - 'mincharnum': 1, - }) - if regex is None: - attrs['separators'] = default_separators - else: - attrs['tokenexp'] = regex - - tokenized = scope.get_unique_variable_name('tokenized') - container.add_node(op_type, normalized, tokenized, - op_domain='com.microsoft', **attrs) - - # Flatten - # Tokenizer outputs shape {1, C} or {1, 1, C}. - # Second shape is not allowed by TfIdfVectorizer. - # We use Flatten which produces {1, C} in both cases. - flatt_tokenized = scope.get_unique_variable_name('flattened') - container.add_node("Flatten", tokenized, flatt_tokenized, - name=scope.get_unique_operator_name('Flatten')) - tokenized = flatt_tokenized - - # Ngram - TfIdfVectorizer - C = max(op.vocabulary_.values()) + 1 - words = [None for i in range(C)] - weights = [0 for i in range(C)] - for k, v in op.vocabulary_.items(): - words[v] = k - weights[v] = 1. - mode = 'TF' - - # Scikit-learn sorts n-grams by alphabetical order.. - # onnx assumes it is sorted by n. - tokenizer = op.build_tokenizer() - split_words = [] - existing = set() - for w in words: - spl = _intelligent_split(w, op, tokenizer, existing) - split_words.append((spl, w)) - - ng_split_words = [(len(a[0]), a[0], i) for i, a in enumerate(split_words)] - ng_split_words.sort() - key_indices = [a[2] for a in ng_split_words] - ngcounts = [0 for i in range(op.ngram_range[0])] - - words = list(ng_split_words[0][1]) - for i in range(1, len(ng_split_words)): - if ng_split_words[i-1][0] != ng_split_words[i][0]: - ngcounts.append(len(words)) - words.extend(ng_split_words[i][1]) - - weights_ = [weights[a[2]] for a in ng_split_words] - weights = list(weights_) - for i, ind in enumerate(key_indices): - weights[ind] = weights_[i] - - # Create the node. - attrs = {'name': scope.get_unique_operator_name("TfIdfVectorizer")} - attrs.update({ - 'min_gram_length': op.ngram_range[0], - 'max_gram_length': op.ngram_range[1], - 'mode': mode, - 'max_skip_count': 0, - 'pool_strings': words, - 'ngram_indexes': key_indices, - 'ngram_counts': ngcounts, - 'weights': list(map(np.float32, weights)), - }) - output = scope.get_unique_variable_name('output') - - proto_dtype = guess_proto_type(operator.inputs[0].type) - if proto_dtype != onnx_proto.TensorProto.DOUBLE: - proto_dtype = onnx_proto.TensorProto.FLOAT - - if proto_dtype == onnx_proto.TensorProto.DOUBLE: - output_tf = scope.get_unique_variable_name('cast_result') - else: - output_tf = output - - if container.target_opset < 9: - op_type = 'Ngram' - container.add_node(op_type, tokenized, output_tf, - op_domain='com.microsoft', **attrs) - else: - op_type = 'TfIdfVectorizer' - container.add_node(op_type, tokenized, output_tf, op_domain='', - op_version=9, **attrs) - - if proto_dtype == onnx_proto.TensorProto.DOUBLE: - apply_cast(scope, output_tf, output, - container, to=proto_dtype) - - if op.binary: - cast_result_name = scope.get_unique_variable_name('cast_result') - output_name = scope.get_unique_variable_name('output_name') - - apply_cast(scope, output, cast_result_name, container, - to=onnx_proto.TensorProto.BOOL) - apply_cast(scope, cast_result_name, output_name, - container, to=onnx_proto.TensorProto.FLOAT) - output = output_name - - options = container.get_options(op, dict(nan=False)) - replace_by_nan = options.get('nan', False) - if replace_by_nan: - # This part replaces all null values by nan. - cst_nan_name = scope.get_unique_variable_name('nan_name') - container.add_initializer(cst_nan_name, proto_dtype, [1], [np.nan]) - cst_zero_name = scope.get_unique_variable_name('zero_name') - container.add_initializer(cst_zero_name, proto_dtype, [1], [0]) - - mask_name = scope.get_unique_variable_name('mask_name') - container.add_node('Equal', [output, cst_zero_name], - mask_name, - name=scope.get_unique_operator_name('Equal')) - - where_name = scope.get_unique_variable_name('where_name') - container.add_node('Where', [mask_name, cst_nan_name, output], - where_name, - name=scope.get_unique_operator_name('Where')) - output = where_name - - apply_identity(scope, output, operator.output_full_names, container)
- - -register_converter('SklearnCountVectorizer', convert_sklearn_text_vectorizer, - options={'tokenexp': None, 'separators': None, - 'nan': [True, False], - 'keep_empty_string': [True, False]}) -
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.operator_converters.text_vectoriser - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.operator_converters.text_vectoriser

+# SPDX-License-Identifier: Apache-2.0
+
+
+import warnings
+from collections import OrderedDict, Counter
+import numpy as np
+from ..common._apply_operation import (
+    apply_cast, apply_reshape, apply_identity)
+from ..common._registration import register_converter
+from ..common._topology import Scope, Operator
+from ..common._container import ModelComponentContainer
+from ..common.data_types import guess_proto_type, StringTensorType
+from ..proto import onnx_proto
+from ..algebra.onnx_ops import OnnxStringNormalizer
+
+
+def _intelligent_split(text, op, tokenizer, existing):
+    """
+    Splits text into tokens. *scikit-learn*
+    merges tokens with ``' '.join(tokens)``
+    to name ngrams. ``'a  b'`` could be ``('a ', 'b')``
+    or ``('a', ' b')``.
+    See `ngram sequence
+    <https://github.com/scikit-learn/scikit-learn/blob/master/
+    sklearn/feature_extraction/text.py#L169>`_.
+    """
+    if op.analyzer == 'word':
+        if op.ngram_range[0] == op.ngram_range[1] == 1:
+            spl = [text]
+        elif op.ngram_range[0] == 1 and len(text) >= 2:
+            # Every element is in the vocabulary.
+            # Naive method
+            p1 = len(text) - len(text.lstrip())
+            p2_ = len(text) - len(text.rstrip())
+            if p2_ == 0:
+                p2 = len(text)
+            else:
+                p2 = -p2_
+            spl = text[p1:p2].split()
+            if len(spl) <= 1:
+                spl = [text]
+            else:
+                spl[0] = " " * p1 + spl[0]
+                spl[-1] = spl[-1] + " " * p2_
+            exc = None
+            if len(spl) == 1:
+                pass
+            elif len(spl) == 2:
+                if (spl[0] not in op.vocabulary_ or
+                        spl[1] not in op.vocabulary_):
+                    # This is neceassarily a single token.
+                    spl = [text]
+                elif spl[0] in op.vocabulary_ and spl[1] in op.vocabulary_:
+                    # ambiguity
+                    # w1, w2 can be either a 2-grams, either a token.
+                    # Usually, ' ' is not part of any token.
+                    pass
+            elif len(spl) == 3:
+                stok = (all([s in op.vocabulary_ for s in spl]), spl)
+                spl12 = (spl[2] in op.vocabulary_ and
+                         (spl[0] + ' ' + spl[1]) in op.vocabulary_,
+                         [spl[0] + ' ' + spl[1], spl[2]])
+                spl23 = (spl[0] in op.vocabulary_ and
+                         (spl[1] + ' ' + spl[2]) in op.vocabulary_,
+                         [spl[0], spl[1] + ' ' + spl[2]])
+                c = Counter(map(lambda t: t[0], [stok, spl12, spl23]))
+                if c.get(True, -1) == 0:
+                    spl = [text]
+                found = [el[1] for el in [stok, spl12, spl23] if el[0]]
+                if len(found) == 1:
+                    spl = found[0]
+                elif len(found) == 0:
+                    spl = [text]
+                elif stok[0]:
+                    # By default, we assume the token is just the sum of
+                    # single words.
+                    pass
+                else:
+                    exc = (
+                        "More than one decomposition in tokens: [" +
+                        ", ".join(map(lambda t: "-".join(t), found)) + "].")
+            elif any(map(lambda g: g in op.vocabulary_, spl)):
+                # TODO: handle this case with an algorithm
+                # which is able to break a string into
+                # known substrings.
+                exc = "Unable to identify tokens in n-grams."
+            if exc:
+                raise RuntimeError(
+                    "Unable to split n-grams '{}' into tokens. "
+                    "{} This happens when a token contain "
+                    "spaces. Token '{}' may be a token or a n-gram '{}'."
+                    "".format(text, exc, text, spl))
+        else:
+            # We reuse the tokenizer hoping that will clear
+            # ambiguities but this might be slow.
+            spl = tokenizer(text)
+    else:
+        spl = list(text)
+
+    spl = tuple(spl)
+    if spl in existing:
+        raise RuntimeError(
+            f"The converter cannot guess how to split expression "
+            f"{text!r} into tokens. This case happens when tokens have "
+            f"spaces.")
+    if (op.ngram_range[0] == 1 and
+            (len(op.ngram_range) == 1 or op.ngram_range[1] > 1)):
+        # All grams should be existing in the vocabulary.
+        for g in spl:
+            if g not in op.vocabulary_:
+                raise RuntimeError(
+                    "Unable to split n-grams '{}' into tokens {} "
+                    "existing in the vocabulary. Token '{}' does not "
+                    "exist in the vocabulary."
+                    ".".format(text, spl, g))
+    existing.add(spl)
+    return spl
+
+
+
[docs]def convert_sklearn_text_vectorizer(scope: Scope, operator: Operator, + container: ModelComponentContainer): + """ + Converters for class + `TfidfVectorizer <https://scikit-learn.org/stable/modules/generated/ + sklearn.feature_extraction.text.TfidfVectorizer.html>`_. + The current implementation is a work in progress and the ONNX version + does not produce the exact same results. The converter lets the user + change some of its parameters. + + Additional options + ------------------ + + tokenexp: string + The default will change to true in version 1.6.0. + The tokenizer splits into words using this regular + expression or the regular expression specified by + *scikit-learn* is the value is an empty string. + See also note below. + Default value: None + separators: list of separators + These separators are used to split a string into words. + Options *separators* is ignore if options *tokenexp* is not None. + Default value: ``[' ', '[.]', '\\\\?', ',', ';', ':', '\\\\!']``. + + Example (from :ref:`l-example-tfidfvectorizer`): + + :: + + seps = {TfidfVectorizer: {"separators": [' ', '[.]', '\\\\?', ',', ';', + ':', '!', '\\\\(', '\\\\)', + '\\n', '\\\\"', "'", "-", + "\\\\[", "\\\\]", "@"]}} + model_onnx = convert_sklearn(pipeline, "tfidf", + initial_types=[("input", StringTensorType([None, 2]))], + options=seps) + + The default regular expression of the tokenizer is ``(?u)\\\\b\\\\w\\\\w+\\\\b`` + (see `re <https://docs.python.org/3/library/re.html>`_). + This expression may not supported by the library handling the backend. + `onnxruntime <https://github.com/Microsoft/onnxruntime>`_ uses + `re2 <https://github.com/google/re2>`_. You may need to switch + to a custom tokenizer based on + `python wrapper for re2 <https://pypi.org/project/re2/>`_ + or its sources `pyre2 <https://github.com/facebook/pyre2>`_ + (`syntax <https://github.com/google/re2/blob/master/doc/syntax.txt>`_). + If the regular expression is not specified and if + the instance of TfidfVectorizer is using the default + pattern ``(?u)\\\\b\\\\w\\\\w+\\\\b``, it is replaced by + ``[a-zA-Z0-9_]+``. Any other case has to be + manually handled. + + Regular expression ``[^\\\\\\\\n]`` is used to split + a sentance into character (and not works) if ``analyser=='char'``. + The mode ``analyser=='char_wb'`` is not implemented. + + .. versionchanged:: 1.6 + Parameters have been renamed: *sep* into *separators*, + *regex* into *tokenexp*. + ```` + + """ # noqa + op = operator.raw_operator + + if (container.target_opset is not None and + container.target_opset < 9): + raise RuntimeError( + "Converter for '{}' only works for opset >= 9." + "".format(op.__class__.__name__)) + + if op.analyzer == "char_wb": + raise NotImplementedError( + "CountVectorizer cannot be converted, " + "only tokenizer='word' is fully supported. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.") + if op.analyzer == "char": + warnings.warn( + "The conversion of CountVectorizer may not work. " + "only tokenizer='word' is fully supported. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.", + UserWarning) + if op.strip_accents is not None: + raise NotImplementedError( + "CountVectorizer cannot be converted, " + "only strip_accents=None is supported. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.") + + options = container.get_options( + op, dict(separators="DEFAULT", + tokenexp=None, + nan=False, + keep_empty_string=False)) + if set(options) != {'separators', 'tokenexp', 'nan', 'keep_empty_string'}: + raise RuntimeError("Unknown option {} for {}".format( + set(options) - {'separators'}, type(op))) + + if op.analyzer == 'word': + default_pattern = '(?u)\\b\\w\\w+\\b' + if options['separators'] == "DEFAULT" and options['tokenexp'] is None: + regex = op.token_pattern + if regex == default_pattern: + regex = '[a-zA-Z0-9_]+' + default_separators = None + elif options['tokenexp'] is not None: + if options['tokenexp']: + regex = options['tokenexp'] + else: + regex = op.token_pattern + if regex == default_pattern: + regex = '[a-zA-Z0-9_]+' + default_separators = None + else: + regex = None + default_separators = options['separators'] + else: + if options['separators'] != 'DEFAULT': + raise RuntimeError("Option separators has no effect " + "if analyser != 'word'.") + regex = options['tokenexp'] if options['tokenexp'] else '.' + default_separators = None + + if op.preprocessor is not None: + raise NotImplementedError( + "Custom preprocessor cannot be converted into ONNX. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.") + if op.tokenizer is not None: + raise NotImplementedError( + "Custom tokenizer cannot be converted into ONNX. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.") + if op.strip_accents is not None: + raise NotImplementedError( + "Operator StringNormalizer cannot remove accents. " + "You may raise an issue at " + "https://github.com/onnx/sklearn-onnx/issues.") + + if hasattr(op, "stop_words_"): + stop_words = op.stop_words_ | ( + set(op.stop_words) if op.stop_words else set()) + else: + stop_words = set() + for w in stop_words: + if not isinstance(w, str): + raise TypeError( + f"One stop word is not a string {w!r} " + f"in stop_words={stop_words}.") + + if op.lowercase or stop_words: + if len(operator.input_full_names) != 1: + raise RuntimeError("Only one input is allowed, found {}.".format( + operator.input_full_names)) + + # StringNormalizer + op_type = 'StringNormalizer' + attrs = {'name': scope.get_unique_operator_name(op_type)} + normalized = scope.get_unique_variable_name('normalized') + if container.target_opset >= 10: + attrs.update({ + 'case_change_action': 'LOWER', + 'is_case_sensitive': not op.lowercase, + }) + op_version = 10 + domain = '' + else: + attrs.update({ + 'casechangeaction': 'LOWER', + 'is_case_sensitive': not op.lowercase, + }) + op_version = 9 + domain = 'com.microsoft' + opvs = 1 if domain == 'com.microsoft' else op_version + if stop_words: + attrs['stopwords'] = list(sorted(stop_words)) + + if options['keep_empty_string']: + del attrs['name'] + op_norm = OnnxStringNormalizer( + 'text_in', op_version=container.target_opset, + output_names=['text_out'], **attrs) + scan_body = op_norm.to_onnx( + OrderedDict([('text_in', StringTensorType())]), + outputs=[('text_out', StringTensorType())], + target_opset=op_version) + + vector = scope.get_unique_variable_name('vector') + apply_reshape(scope, operator.input_full_names[0], + vector, container, + desired_shape=(-1, 1)) + container.add_node('Scan', vector, normalized, + body=scan_body.graph, num_scan_inputs=1) + else: + flatten = scope.get_unique_variable_name('flattened') + apply_reshape(scope, operator.input_full_names[0], + flatten, container, + desired_shape=(-1, )) + container.add_node(op_type, flatten, + normalized, op_version=opvs, + op_domain=domain, **attrs) + else: + normalized = operator.input_full_names + + # Tokenizer + padvalue = "#" + while padvalue in op.vocabulary_: + padvalue += "#" + + op_type = 'Tokenizer' + attrs = {'name': scope.get_unique_operator_name(op_type)} + attrs.update({ + 'pad_value': padvalue, + 'mark': False, + 'mincharnum': 1, + }) + if regex is None: + attrs['separators'] = default_separators + else: + attrs['tokenexp'] = regex + + tokenized = scope.get_unique_variable_name('tokenized') + container.add_node(op_type, normalized, tokenized, + op_domain='com.microsoft', **attrs) + + # Flatten + # Tokenizer outputs shape {1, C} or {1, 1, C}. + # Second shape is not allowed by TfIdfVectorizer. + # We use Flatten which produces {1, C} in both cases. + flatt_tokenized = scope.get_unique_variable_name('flattened') + container.add_node("Flatten", tokenized, flatt_tokenized, + name=scope.get_unique_operator_name('Flatten')) + tokenized = flatt_tokenized + + # Ngram - TfIdfVectorizer + C = max(op.vocabulary_.values()) + 1 + words = [None for i in range(C)] + weights = [0 for i in range(C)] + for k, v in op.vocabulary_.items(): + words[v] = k + weights[v] = 1. + mode = 'TF' + + # Scikit-learn sorts n-grams by alphabetical order.. + # onnx assumes it is sorted by n. + tokenizer = op.build_tokenizer() + split_words = [] + existing = set() + errors = [] + for w in words: + if isinstance(w, tuple): + # TraceableCountVectorizer, TraceableTfIdfVectorizer + spl = list(w) + w = ' '.join(w) + else: + # CountVectorizer, TfIdfVectorizer + try: + spl = _intelligent_split(w, op, tokenizer, existing) + except RuntimeError as e: + errors.append(e) + continue + split_words.append((spl, w)) + if len(errors) > 0: + err = "\n".join(map(str, errors)) + raise RuntimeError( + f"There were ambiguities between n-grams and tokens. " + f"{len(errors)} errors occurred. You can fix it by using " + f"class Traceable{op.__class__.__name__}.\n" + f"You can learn more at https://github.com/scikit-learn/" + f"scikit-learn/issues/13733.\n{err}") + + ng_split_words = sorted([(len(a[0]), a[0], i) + for i, a in enumerate(split_words)]) + key_indices = [a[2] for a in ng_split_words] + ngcounts = [0 for i in range(op.ngram_range[0])] + + words = list(ng_split_words[0][1]) + for i in range(1, len(ng_split_words)): + if ng_split_words[i - 1][0] != ng_split_words[i][0]: + ngcounts.append(len(words)) + words.extend(ng_split_words[i][1]) + + weights_ = [weights[a[2]] for a in ng_split_words] + weights = list(weights_) + for i, ind in enumerate(key_indices): + weights[ind] = weights_[i] + + # Create the node. + attrs = {'name': scope.get_unique_operator_name("TfIdfVectorizer")} + attrs.update({ + 'min_gram_length': op.ngram_range[0], + 'max_gram_length': op.ngram_range[1], + 'mode': mode, + 'max_skip_count': 0, + 'pool_strings': words, + 'ngram_indexes': key_indices, + 'ngram_counts': ngcounts, + 'weights': list(map(np.float32, weights)), + }) + output = scope.get_unique_variable_name('output') + + proto_dtype = guess_proto_type(operator.inputs[0].type) + if proto_dtype != onnx_proto.TensorProto.DOUBLE: + proto_dtype = onnx_proto.TensorProto.FLOAT + + if proto_dtype == onnx_proto.TensorProto.DOUBLE: + output_tf = scope.get_unique_variable_name('cast_result') + else: + output_tf = output + + if container.target_opset < 9: + op_type = 'Ngram' + container.add_node(op_type, tokenized, output_tf, + op_domain='com.microsoft', **attrs) + else: + op_type = 'TfIdfVectorizer' + container.add_node(op_type, tokenized, output_tf, op_domain='', + op_version=9, **attrs) + + if proto_dtype == onnx_proto.TensorProto.DOUBLE: + apply_cast(scope, output_tf, output, + container, to=proto_dtype) + + if op.binary: + cast_result_name = scope.get_unique_variable_name('cast_result') + output_name = scope.get_unique_variable_name('output_name') + + apply_cast(scope, output, cast_result_name, container, + to=onnx_proto.TensorProto.BOOL) + apply_cast(scope, cast_result_name, output_name, + container, to=onnx_proto.TensorProto.FLOAT) + output = output_name + + options = container.get_options(op, dict(nan=False)) + replace_by_nan = options.get('nan', False) + if replace_by_nan: + # This part replaces all null values by nan. + cst_nan_name = scope.get_unique_variable_name('nan_name') + container.add_initializer(cst_nan_name, proto_dtype, [1], [np.nan]) + cst_zero_name = scope.get_unique_variable_name('zero_name') + container.add_initializer(cst_zero_name, proto_dtype, [1], [0]) + + mask_name = scope.get_unique_variable_name('mask_name') + container.add_node('Equal', [output, cst_zero_name], + mask_name, + name=scope.get_unique_operator_name('Equal')) + + where_name = scope.get_unique_variable_name('where_name') + container.add_node('Where', [mask_name, cst_nan_name, output], + where_name, + name=scope.get_unique_operator_name('Where')) + output = where_name + + apply_identity(scope, output, operator.output_full_names, container)
+ + +register_converter('SklearnCountVectorizer', convert_sklearn_text_vectorizer, + options={'tokenexp': None, 'separators': None, + 'nan': [True, False], + 'keep_empty_string': [True, False]}) +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_modules/skl2onnx/proto.html b/_modules/skl2onnx/proto.html index 7dc078b4e..881124f94 100644 --- a/_modules/skl2onnx/proto.html +++ b/_modules/skl2onnx/proto.html @@ -1,296 +1,384 @@ - - - - - - - - skl2onnx.proto — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- -
- - - - -
- -
- -

Source code for skl2onnx.proto

-# SPDX-License-Identifier: Apache-2.0
-
-
-# Rather than using ONNX protobuf definition throughout our codebase,
-# we import ONNX protobuf definition here so that we can conduct quick
-# fixes by overwriting ONNX functions without changing any lines
-# elsewhere.
-from onnx import onnx_pb as onnx_proto  # noqa
-from onnx import defs  # noqa
-
-# Overwrite the make_tensor defined in onnx.helper because of a bug
-# (string tensor get assigned twice)
-from onnx import mapping
-from onnx.onnx_pb import TensorProto, ValueInfoProto  # noqa
-try:
-    from onnx.onnx_pb import SparseTensorProto  # noqa
-except ImportError:
-    # onnx is too old.
-    pass
-from onnx.helper import split_complex_to_pairs
-
-
-def make_tensor_fixed(name, data_type, dims, vals, raw=False):
-    '''
-    Make a TensorProto with specified arguments.  If raw is False, this
-    function will choose the corresponding proto field to store the
-    values based on data_type. If raw is True, use "raw_data" proto
-    field to store the values, and values should be of type bytes in
-    this case.
-    '''
-    tensor = TensorProto()
-    tensor.data_type = data_type
-    tensor.name = name
-
-    if (data_type == TensorProto.COMPLEX64 or
-            data_type == TensorProto.COMPLEX128):
-        vals = split_complex_to_pairs(vals)
-    if raw:
-        tensor.raw_data = vals
-    else:
-        field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
-            mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
-        getattr(tensor, field).extend(vals)
-
-    tensor.dims.extend(dims)
-    return tensor
-
-
-def get_opset_number_from_onnx():
-    """
-    Returns the latest opset version supported
-    by the *onnx* package.
-    """
-    return defs.onnx_opset_version()
-
-
-
[docs]def get_latest_tested_opset_version(): - """ - This module relies on *onnxruntime* to test every - converter. The function returns the most recent - target opset tested with *onnxruntime* or the opset - version specified by *onnx* package if this one is lower - (return by `onnx.defs.onnx_opset_version()`). - """ - from .. import __max_supported_opset__ - return min(__max_supported_opset__, get_opset_number_from_onnx())
-
- -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + skl2onnx.proto - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for skl2onnx.proto

+# SPDX-License-Identifier: Apache-2.0
+
+
+# Rather than using ONNX protobuf definition throughout our codebase,
+# we import ONNX protobuf definition here so that we can conduct quick
+# fixes by overwriting ONNX functions without changing any lines
+# elsewhere.
+from onnx import onnx_pb as onnx_proto  # noqa
+from onnx import defs  # noqa
+
+# Overwrite the make_tensor defined in onnx.helper because of a bug
+# (string tensor get assigned twice)
+from onnx import mapping
+from onnx.onnx_pb import TensorProto, ValueInfoProto  # noqa
+try:
+    from onnx.onnx_pb import SparseTensorProto  # noqa
+except ImportError:
+    # onnx is too old.
+    pass
+from onnx.helper import split_complex_to_pairs
+
+
+def make_tensor_fixed(name, data_type, dims, vals, raw=False):
+    '''
+    Make a TensorProto with specified arguments.  If raw is False, this
+    function will choose the corresponding proto field to store the
+    values based on data_type. If raw is True, use "raw_data" proto
+    field to store the values, and values should be of type bytes in
+    this case.
+    '''
+    tensor = TensorProto()
+    tensor.data_type = data_type
+    tensor.name = name
+
+    if (data_type == TensorProto.COMPLEX64 or
+            data_type == TensorProto.COMPLEX128):
+        vals = split_complex_to_pairs(vals)
+    if raw:
+        tensor.raw_data = vals
+    else:
+        field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
+            mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
+        getattr(tensor, field).extend(vals)
+
+    tensor.dims.extend(dims)
+    return tensor
+
+
+def get_opset_number_from_onnx():
+    """
+    Returns the latest opset version supported
+    by the *onnx* package.
+    """
+    return defs.onnx_opset_version()
+
+
+
[docs]def get_latest_tested_opset_version(): + """ + This module relies on *onnxruntime* to test every + converter. The function returns the most recent + target opset tested with *onnxruntime* or the opset + version specified by *onnx* package if this one is lower + (return by `onnx.defs.onnx_opset_version()`). + """ + from .. import __max_supported_opset__ + return min(__max_supported_opset__, get_opset_number_from_onnx())
+
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/_sources/api_summary.rst.txt b/_sources/api_summary.rst.txt index 660603595..16041defc 100644 --- a/_sources/api_summary.rst.txt +++ b/_sources/api_summary.rst.txt @@ -8,10 +8,6 @@ API Summary Summary of public functions and classes exposed in *scikit-onnx*. -.. contents:: - :local: - - Version ======= diff --git a/_sources/auto_examples/index.rst.txt b/_sources/auto_examples/index.rst.txt index 16500341d..f22af64fc 100644 --- a/_sources/auto_examples/index.rst.txt +++ b/_sources/auto_examples/index.rst.txt @@ -1,9 +1,5 @@ :orphan: - - -.. _sphx_glr_auto_examples: - Gallery of examples =================== @@ -12,551 +8,482 @@ Gallery of examples +.. raw:: html + +
+ + .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_metadata_thumb.png - :alt: Metadata + .. image:: /auto_examples/images/thumb/sphx_glr_plot_metadata_thumb.png + :alt: Metadata - :ref:`sphx_glr_auto_examples_plot_metadata.py` + :ref:`sphx_glr_auto_examples_plot_metadata.py` .. raw:: html +
Metadata
-.. toctree:: - :hidden: - - /auto_examples/plot_metadata - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_thumb.png - :alt: Draw a pipeline + .. image:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_thumb.png + :alt: Draw a pipeline - :ref:`sphx_glr_auto_examples_plot_pipeline.py` + :ref:`sphx_glr_auto_examples_plot_pipeline.py` .. raw:: html +
Draw a pipeline
-.. toctree:: - :hidden: - - /auto_examples/plot_pipeline - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_backend_thumb.png - :alt: ONNX Runtime Backend for ONNX + .. image:: /auto_examples/images/thumb/sphx_glr_plot_backend_thumb.png + :alt: ONNX Runtime Backend for ONNX - :ref:`sphx_glr_auto_examples_plot_backend.py` + :ref:`sphx_glr_auto_examples_plot_backend.py` .. raw:: html +
ONNX Runtime Backend for ONNX
-.. toctree:: - :hidden: - - /auto_examples/plot_backend - .. raw:: html -
+
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_convert_decision_function_thumb.png - :alt: Probabilities or raw scores + .. image:: /auto_examples/images/thumb/sphx_glr_plot_logging_thumb.png + :alt: Logging, verbose - :ref:`sphx_glr_auto_examples_plot_convert_decision_function.py` + :ref:`sphx_glr_auto_examples_plot_logging.py` .. raw:: html +
Logging, verbose
-.. toctree:: - :hidden: - - /auto_examples/plot_convert_decision_function - .. raw:: html -
+
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_logging_thumb.png - :alt: Logging, verbose + .. image:: /auto_examples/images/thumb/sphx_glr_plot_convert_decision_function_thumb.png + :alt: Probabilities or raw scores - :ref:`sphx_glr_auto_examples_plot_logging.py` + :ref:`sphx_glr_auto_examples_plot_convert_decision_function.py` .. raw:: html +
Probabilities or raw scores
-.. toctree:: - :hidden: - - /auto_examples/plot_logging - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_convert_model_thumb.png - :alt: Train, convert and predict a model + .. image:: /auto_examples/images/thumb/sphx_glr_plot_convert_model_thumb.png + :alt: Train, convert and predict a model - :ref:`sphx_glr_auto_examples_plot_convert_model.py` + :ref:`sphx_glr_auto_examples_plot_convert_model.py` .. raw:: html +
Train, convert and predict a model
-.. toctree:: - :hidden: - - /auto_examples/plot_convert_model - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_investigate_pipeline_thumb.png - :alt: Investigate a pipeline + .. image:: /auto_examples/images/thumb/sphx_glr_plot_investigate_pipeline_thumb.png + :alt: Investigate a pipeline - :ref:`sphx_glr_auto_examples_plot_investigate_pipeline.py` + :ref:`sphx_glr_auto_examples_plot_investigate_pipeline.py` .. raw:: html +
Investigate a pipeline
-.. toctree:: - :hidden: - - /auto_examples/plot_investigate_pipeline - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_benchmark_cdist_thumb.png - :alt: Compare CDist with scipy + .. image:: /auto_examples/images/thumb/sphx_glr_plot_benchmark_cdist_thumb.png + :alt: Compare CDist with scipy - :ref:`sphx_glr_auto_examples_plot_benchmark_cdist.py` + :ref:`sphx_glr_auto_examples_plot_benchmark_cdist.py` .. raw:: html +
Compare CDist with scipy
-.. toctree:: - :hidden: - - /auto_examples/plot_benchmark_cdist - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_lightgbm_thumb.png - :alt: Convert a pipeline with a LightGbm model + .. image:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_lightgbm_thumb.png + :alt: Convert a pipeline with a LightGbm model - :ref:`sphx_glr_auto_examples_plot_pipeline_lightgbm.py` + :ref:`sphx_glr_auto_examples_plot_pipeline_lightgbm.py` .. raw:: html +
Convert a pipeline with a LightGbm model
-.. toctree:: - :hidden: - - /auto_examples/plot_pipeline_lightgbm - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_convert_zipmap_thumb.png - :alt: Probabilities as a vector or as a ZipMap + .. image:: /auto_examples/images/thumb/sphx_glr_plot_convert_zipmap_thumb.png + :alt: Probabilities as a vector or as a ZipMap - :ref:`sphx_glr_auto_examples_plot_convert_zipmap.py` + :ref:`sphx_glr_auto_examples_plot_convert_zipmap.py` .. raw:: html +
Probabilities as a vector or as a ZipMap
-.. toctree:: - :hidden: - - /auto_examples/plot_convert_zipmap - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_black_op_thumb.png - :alt: Convert a model with a reduced list of operators + .. image:: /auto_examples/images/thumb/sphx_glr_plot_black_op_thumb.png + :alt: Convert a model with a reduced list of operators - :ref:`sphx_glr_auto_examples_plot_black_op.py` + :ref:`sphx_glr_auto_examples_plot_black_op.py` .. raw:: html +
Convert a model with a reduced list of operators
-.. toctree:: - :hidden: - - /auto_examples/plot_black_op - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_benchmark_pipeline_thumb.png - :alt: Benchmark a pipeline + .. image:: /auto_examples/images/thumb/sphx_glr_plot_benchmark_pipeline_thumb.png + :alt: Benchmark a pipeline - :ref:`sphx_glr_auto_examples_plot_benchmark_pipeline.py` + :ref:`sphx_glr_auto_examples_plot_benchmark_pipeline.py` .. raw:: html +
Benchmark a pipeline
-.. toctree:: - :hidden: - - /auto_examples/plot_benchmark_pipeline - .. raw:: html -
+
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_cast_transformer_thumb.png - :alt: Discrepencies with StandardScaler + .. image:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_xgboost_thumb.png + :alt: Convert a pipeline with a XGBoost model - :ref:`sphx_glr_auto_examples_plot_cast_transformer.py` + :ref:`sphx_glr_auto_examples_plot_pipeline_xgboost.py` .. raw:: html +
Convert a pipeline with a XGBoost model
-.. toctree:: - :hidden: - - /auto_examples/plot_cast_transformer - .. raw:: html -
+
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_pipeline_xgboost_thumb.png - :alt: Convert a pipeline with a XGBoost model + .. image:: /auto_examples/images/thumb/sphx_glr_plot_nmf_thumb.png + :alt: Custom Operator for NMF Decomposition - :ref:`sphx_glr_auto_examples_plot_pipeline_xgboost.py` + :ref:`sphx_glr_auto_examples_plot_nmf.py` .. raw:: html +
Custom Operator for NMF Decomposition
-.. toctree:: - :hidden: - - /auto_examples/plot_pipeline_xgboost - .. raw:: html -
+
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_nmf_thumb.png - :alt: Custom Operator for NMF Decomposition + .. image:: /auto_examples/images/thumb/sphx_glr_plot_cast_transformer_thumb.png + :alt: Discrepencies with StandardScaler - :ref:`sphx_glr_auto_examples_plot_nmf.py` + :ref:`sphx_glr_auto_examples_plot_cast_transformer.py` .. raw:: html +
Discrepencies with StandardScaler
-.. toctree:: - :hidden: - - /auto_examples/plot_nmf - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_gpr_thumb.png - :alt: Discrepencies with GaussianProcessorRegressor: use of double + .. image:: /auto_examples/images/thumb/sphx_glr_plot_gpr_thumb.png + :alt: Discrepencies with GaussianProcessorRegressor: use of double - :ref:`sphx_glr_auto_examples_plot_gpr.py` + :ref:`sphx_glr_auto_examples_plot_gpr.py` .. raw:: html +
Discrepencies with GaussianProcessorRegressor: use of double
-.. toctree:: - :hidden: - - /auto_examples/plot_gpr - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_errors_onnxruntime_thumb.png - :alt: Errors with onnxruntime + .. image:: /auto_examples/images/thumb/sphx_glr_plot_errors_onnxruntime_thumb.png + :alt: Errors with onnxruntime - :ref:`sphx_glr_auto_examples_plot_errors_onnxruntime.py` + :ref:`sphx_glr_auto_examples_plot_errors_onnxruntime.py` .. raw:: html +
Errors with onnxruntime
-.. toctree:: - :hidden: - - /auto_examples/plot_errors_onnxruntime - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_onnx_operators_thumb.png - :alt: Play with ONNX operators + .. image:: /auto_examples/images/thumb/sphx_glr_plot_onnx_operators_thumb.png + :alt: Play with ONNX operators - :ref:`sphx_glr_auto_examples_plot_onnx_operators.py` + :ref:`sphx_glr_auto_examples_plot_onnx_operators.py` .. raw:: html +
Play with ONNX operators
-.. toctree:: - :hidden: - - /auto_examples/plot_onnx_operators - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_convert_syntax_thumb.png - :alt: Different ways to convert a model + .. image:: /auto_examples/images/thumb/sphx_glr_plot_convert_syntax_thumb.png + :alt: Different ways to convert a model - :ref:`sphx_glr_auto_examples_plot_convert_syntax.py` + :ref:`sphx_glr_auto_examples_plot_convert_syntax.py` .. raw:: html +
Different ways to convert a model
-.. toctree:: - :hidden: - - /auto_examples/plot_convert_syntax - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_tfidfvectorizer_thumb.png - :alt: TfIdfVectorizer with ONNX + .. image:: /auto_examples/images/thumb/sphx_glr_plot_tfidfvectorizer_thumb.png + :alt: TfIdfVectorizer with ONNX - :ref:`sphx_glr_auto_examples_plot_tfidfvectorizer.py` + :ref:`sphx_glr_auto_examples_plot_tfidfvectorizer.py` .. raw:: html +
TfIdfVectorizer with ONNX
-.. toctree:: - :hidden: - - /auto_examples/plot_tfidfvectorizer - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_complex_pipeline_thumb.png - :alt: Convert a pipeline with ColumnTransformer + .. image:: /auto_examples/images/thumb/sphx_glr_plot_complex_pipeline_thumb.png + :alt: Convert a pipeline with ColumnTransformer - :ref:`sphx_glr_auto_examples_plot_complex_pipeline.py` + :ref:`sphx_glr_auto_examples_plot_complex_pipeline.py` .. raw:: html +
Convert a pipeline with ColumnTransformer
-.. toctree:: - :hidden: - - /auto_examples/plot_complex_pipeline - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_intermediate_outputs_thumb.png - :alt: Walk through intermediate outputs + .. image:: /auto_examples/images/thumb/sphx_glr_plot_intermediate_outputs_thumb.png + :alt: Walk through intermediate outputs - :ref:`sphx_glr_auto_examples_plot_intermediate_outputs.py` + :ref:`sphx_glr_auto_examples_plot_intermediate_outputs.py` .. raw:: html +
Walk through intermediate outputs
-.. toctree:: - :hidden: - - /auto_examples/plot_intermediate_outputs - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_custom_parser_alternative_thumb.png - :alt: When a custom model is neither a classifier nor a regressor (alternative) + .. image:: /auto_examples/images/thumb/sphx_glr_plot_custom_parser_alternative_thumb.png + :alt: When a custom model is neither a classifier nor a regressor (alternative) - :ref:`sphx_glr_auto_examples_plot_custom_parser_alternative.py` + :ref:`sphx_glr_auto_examples_plot_custom_parser_alternative.py` .. raw:: html +
When a custom model is neither a classifier nor a regressor (alternative)
-.. toctree:: - :hidden: - - /auto_examples/plot_custom_parser_alternative - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_custom_parser_thumb.png - :alt: When a custom model is neither a classifier nor a regressor + .. image:: /auto_examples/images/thumb/sphx_glr_plot_custom_parser_thumb.png + :alt: When a custom model is neither a classifier nor a regressor - :ref:`sphx_glr_auto_examples_plot_custom_parser.py` + :ref:`sphx_glr_auto_examples_plot_custom_parser.py` .. raw:: html +
When a custom model is neither a classifier nor a regressor
-.. toctree:: - :hidden: - - /auto_examples/plot_custom_parser - .. raw:: html
.. only:: html - .. figure:: /auto_examples/images/thumb/sphx_glr_plot_custom_model_thumb.png - :alt: Write your own converter for your own model + .. image:: /auto_examples/images/thumb/sphx_glr_plot_custom_model_thumb.png + :alt: Write your own converter for your own model - :ref:`sphx_glr_auto_examples_plot_custom_model.py` + :ref:`sphx_glr_auto_examples_plot_custom_model.py` .. raw:: html +
Write your own converter for your own model
-.. toctree:: - :hidden: - - /auto_examples/plot_custom_model .. raw:: html -
- +
-.. only :: html +.. toctree:: + :hidden: - .. container:: sphx-glr-footer - :class: sphx-glr-footer-gallery + /auto_examples/plot_metadata + /auto_examples/plot_pipeline + /auto_examples/plot_backend + /auto_examples/plot_logging + /auto_examples/plot_convert_decision_function + /auto_examples/plot_convert_model + /auto_examples/plot_investigate_pipeline + /auto_examples/plot_benchmark_cdist + /auto_examples/plot_pipeline_lightgbm + /auto_examples/plot_convert_zipmap + /auto_examples/plot_black_op + /auto_examples/plot_benchmark_pipeline + /auto_examples/plot_pipeline_xgboost + /auto_examples/plot_nmf + /auto_examples/plot_cast_transformer + /auto_examples/plot_gpr + /auto_examples/plot_errors_onnxruntime + /auto_examples/plot_onnx_operators + /auto_examples/plot_convert_syntax + /auto_examples/plot_tfidfvectorizer + /auto_examples/plot_complex_pipeline + /auto_examples/plot_intermediate_outputs + /auto_examples/plot_custom_parser_alternative + /auto_examples/plot_custom_parser + /auto_examples/plot_custom_model - .. container:: sphx-glr-download sphx-glr-download-python +.. only:: html - :download:`Download all examples in Python source code: auto_examples_python.zip ` + .. container:: sphx-glr-footer sphx-glr-footer-gallery + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download all examples in Python source code: auto_examples_python.zip ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip ` + :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip ` .. only:: html diff --git a/_sources/auto_examples/plot_backend.rst.txt b/_sources/auto_examples/plot_backend.rst.txt index 0749f9a09..3c2bc4e35 100644 --- a/_sources/auto_examples/plot_backend.rst.txt +++ b/_sources/auto_examples/plot_backend.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_backend.py" +.. "auto_examples/plot_backend.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -104,12 +104,10 @@ Let's use ONNX backend API to test it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none label=[2 1 2] - probabilities=[{0: 0.0, 1: 0.0, 2: 1.0}, {0: 0.04736955463886261, 1: 0.9526304602622986, 2: 0.0}, {0: 0.0, 1: 0.0, 2: 1.0}] + probabilities=[{0: 0.0, 1: 0.0, 2: 1.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.0, 1: 0.0, 2: 1.0}] @@ -131,8 +129,6 @@ GPU or CPU. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none CPU @@ -165,12 +161,10 @@ without using *onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none label=[1 1 1] - probabilities=[{0: 0.04736955463886261, 1: 0.9526304602622986, 2: 0.0}, {0: 0.04736955463886261, 1: 0.9526304602622986, 2: 0.0}, {0: 0.04736955463886261, 1: 0.9526304602622986, 2: 0.0}] + probabilities=[{0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}] @@ -201,15 +195,13 @@ with the same API. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -217,35 +209,23 @@ with the same API. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.285 seconds) + **Total running time of the script:** ( 0 minutes 0.112 seconds) .. _sphx_glr_download_auto_examples_plot_backend.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_backend.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_backend.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_backend.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_backend.ipynb ` + :download:`Download Jupyter notebook: plot_backend.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_benchmark_cdist.rst.txt b/_sources/auto_examples/plot_benchmark_cdist.rst.txt index d4953fe23..c3d53abbb 100644 --- a/_sources/auto_examples/plot_benchmark_cdist.rst.txt +++ b/_sources/auto_examples/plot_benchmark_cdist.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_benchmark_cdist.py" +.. "auto_examples/plot_benchmark_cdist.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -27,9 +27,6 @@ The following example focuses on one particular operator, CDist and compares its execution time between *onnxruntime* and *scipy*. -.. contents:: - :local: - ONNX Graph with CDist +++++++++++++++++++++ @@ -37,7 +34,7 @@ ONNX Graph with CDist generated/scipy.spatial.distance.cdist.html>`_ function computes pairwise distances. -.. GENERATED FROM PYTHON SOURCE LINES 24-42 +.. GENERATED FROM PYTHON SOURCE LINES 21-39 .. code-block:: default @@ -65,8 +62,6 @@ function computes pairwise distances. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[2. 2. 2.] @@ -75,11 +70,11 @@ function computes pairwise distances. -.. GENERATED FROM PYTHON SOURCE LINES 43-44 +.. GENERATED FROM PYTHON SOURCE LINES 40-41 ONNX -.. GENERATED FROM PYTHON SOURCE LINES 44-52 +.. GENERATED FROM PYTHON SOURCE LINES 41-49 .. code-block:: default @@ -97,13 +92,11 @@ ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 8 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 graph { @@ -169,7 +162,7 @@ ONNX -.. GENERATED FROM PYTHON SOURCE LINES 53-58 +.. GENERATED FROM PYTHON SOURCE LINES 50-55 CDist and onnxruntime +++++++++++++++++++++ @@ -177,12 +170,13 @@ CDist and onnxruntime We compute the output of CDist operator with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 58-63 +.. GENERATED FROM PYTHON SOURCE LINES 55-61 .. code-block:: default - sess = InferenceSession(onx.SerializeToString()) + sess = InferenceSession(onx.SerializeToString(), + providers=["CPUExecutionProvider"]) res = sess.run(None, {'X': X, 'Y': Y}) print(res) @@ -192,8 +186,6 @@ with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [array([[1.9999999, 1.9999999, 1.9999999], @@ -202,14 +194,14 @@ with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 64-68 +.. GENERATED FROM PYTHON SOURCE LINES 62-66 Benchmark +++++++++ Let's compare onnxruntime and scipy. -.. GENERATED FROM PYTHON SOURCE LINES 68-85 +.. GENERATED FROM PYTHON SOURCE LINES 66-83 .. code-block:: default @@ -237,11 +229,11 @@ Let's compare onnxruntime and scipy. -.. GENERATED FROM PYTHON SOURCE LINES 86-87 +.. GENERATED FROM PYTHON SOURCE LINES 84-85 scipy -.. GENERATED FROM PYTHON SOURCE LINES 87-94 +.. GENERATED FROM PYTHON SOURCE LINES 85-92 .. code-block:: default @@ -258,14 +250,12 @@ scipy .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'average': 8.913900000010243e-06, - 'deviation': 1.7710739086742629e-06, - 'max_exec': 1.3679999999993698e-05, - 'min_exec': 7.404999999849338e-06, + {'average': 6.455253499780156e-06, + 'deviation': 8.708922725775654e-07, + 'max_exec': 1.072999999678359e-05, + 'min_exec': 5.390000001170847e-06, 'name': 'scipy', 'ncols': 4, 'nrows': 2, @@ -275,11 +265,11 @@ scipy -.. GENERATED FROM PYTHON SOURCE LINES 95-96 +.. GENERATED FROM PYTHON SOURCE LINES 93-94 onnxruntime -.. GENERATED FROM PYTHON SOURCE LINES 96-102 +.. GENERATED FROM PYTHON SOURCE LINES 94-100 .. code-block:: default @@ -295,14 +285,12 @@ onnxruntime .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'average': 1.352294999997916e-05, - 'deviation': 3.856318263253598e-06, - 'max_exec': 2.9424999999960732e-05, - 'min_exec': 1.1984999999903323e-05, + {'average': 1.2687857000173608e-05, + 'deviation': 1.6693917723816003e-06, + 'max_exec': 1.7610000008971836e-05, + 'min_exec': 1.0040000006483751e-05, 'name': 'ort', 'ncols': 4, 'nrows': 2, @@ -312,11 +300,11 @@ onnxruntime -.. GENERATED FROM PYTHON SOURCE LINES 103-104 +.. GENERATED FROM PYTHON SOURCE LINES 101-102 Longer benchmark -.. GENERATED FROM PYTHON SOURCE LINES 104-128 +.. GENERATED FROM PYTHON SOURCE LINES 102-126 .. code-block:: default @@ -355,25 +343,23 @@ Longer benchmark .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_benchmark_cdist.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_benchmark_cdist.ipynb ` + :download:`Download Jupyter notebook: plot_benchmark_cdist.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_benchmark_pipeline.rst.txt b/_sources/auto_examples/plot_benchmark_pipeline.rst.txt index 33e5abea7..f9bd60d1e 100644 --- a/_sources/auto_examples/plot_benchmark_pipeline.rst.txt +++ b/_sources/auto_examples/plot_benchmark_pipeline.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_benchmark_pipeline.py" +.. "auto_examples/plot_benchmark_pipeline.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -24,9 +24,6 @@ Benchmark a pipeline The following example checks up on every step in a pipeline, compares and benchmarks the predictions. -.. contents:: - :local: - Create a pipeline +++++++++++++++++ @@ -39,7 +36,7 @@ docs/Operators-ml.md#ai.onnx.ml.Imputer>`_ does not handle string type. This cannot be part of the final ONNX pipeline and must be removed. Look for comment starting with ``---`` below. -.. GENERATED FROM PYTHON SOURCE LINES 26-54 +.. GENERATED FROM PYTHON SOURCE LINES 23-51 .. code-block:: default @@ -77,11 +74,9 @@ and must be removed. Look for comment starting with ``---`` below. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1): + /home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: @@ -99,12 +94,12 @@ and must be removed. Look for comment starting with ``---`` below.

-.. GENERATED FROM PYTHON SOURCE LINES 55-57 +.. GENERATED FROM PYTHON SOURCE LINES 52-54 Conversion to ONNX ++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 57-71 +.. GENERATED FROM PYTHON SOURCE LINES 54-69 .. code-block:: default @@ -114,7 +109,8 @@ Conversion to ONNX model_onnx = convert_sklearn(pipe, initial_types=initial_types, target_opset=12) - sess = rt.InferenceSession(model_onnx.SerializeToString()) + sess = rt.InferenceSession(model_onnx.SerializeToString(), + providers=["CPUExecutionProvider"]) print("skl predict_proba") print(pipe.predict_proba(X_digits[:2])) onx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1] @@ -128,34 +124,32 @@ Conversion to ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none skl predict_proba - [[9.99998536e-01 5.99063801e-19 3.48549300e-10 1.55765866e-08 - 3.32560027e-10 1.21314773e-06 3.98960248e-08 1.22513933e-07 - 2.23871298e-08 4.98148709e-08] - [1.47648539e-14 9.99999301e-01 1.05811971e-10 7.49298736e-13 - 2.48627484e-07 8.75686177e-12 5.39025200e-11 2.95899979e-11 - 4.50529114e-07 1.30607573e-13]] + [[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08 + 3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07 + 2.23871272e-08 4.98148509e-08] + [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13 + 2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11 + 4.50528833e-07 1.30607478e-13]] onnx predict_proba [[9.99998569e-01 5.99062501e-19 3.48550355e-10 1.55766493e-08 - 3.32561811e-10 1.21315361e-06 3.98961930e-08 1.22514706e-07 - 2.23872068e-08 4.98151529e-08] - [1.47648956e-14 9.99999285e-01 1.05811790e-10 7.49297488e-13 - 2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95900075e-11 + 3.32561811e-10 1.21315134e-06 3.98961930e-08 1.22514706e-07 + 2.23872494e-08 4.98151529e-08] + [1.47648956e-14 9.99999285e-01 1.05811991e-10 7.49297488e-13 + 2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95899520e-11 4.50529058e-07 1.30607344e-13]] -.. GENERATED FROM PYTHON SOURCE LINES 72-74 +.. GENERATED FROM PYTHON SOURCE LINES 70-72 Comparing outputs +++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 74-78 +.. GENERATED FROM PYTHON SOURCE LINES 72-76 .. code-block:: default @@ -170,12 +164,12 @@ Comparing outputs -.. GENERATED FROM PYTHON SOURCE LINES 79-81 +.. GENERATED FROM PYTHON SOURCE LINES 77-79 Benchmarks ++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 81-89 +.. GENERATED FROM PYTHON SOURCE LINES 79-87 .. code-block:: default @@ -193,19 +187,17 @@ Benchmarks .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none scikit-learn - 0.9949185999999983 + 2.355312850000246 onnxruntime - 0.2637227000000024 + 0.29348953099997743 -.. GENERATED FROM PYTHON SOURCE LINES 90-98 +.. GENERATED FROM PYTHON SOURCE LINES 88-96 Intermediate steps ++++++++++++++++++ @@ -216,7 +208,7 @@ is failing. The following method modifies the scikit-learn pipeline to steal the intermediate outputs and produces an smaller ONNX graph for every operator. -.. GENERATED FROM PYTHON SOURCE LINES 98-128 +.. GENERATED FROM PYTHON SOURCE LINES 96-127 .. code-block:: default @@ -231,7 +223,8 @@ an smaller ONNX graph for every operator. for i, step in enumerate(steps): onnx_step = step['onnx_step'] - sess = rt.InferenceSession(onnx_step.SerializeToString()) + sess = rt.InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)}) skl_outputs = step['model']._debug.outputs if 'transform' in skl_outputs: @@ -256,29 +249,27 @@ an smaller ONNX graph for every operator. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none benchmark scikit-learn - 0.3658737999999957 + 0.6831115730001329 onnxruntime - 0.17143320000000273 + 0.16402971700017588 benchmark scikit-learn - 0.6289949999999962 + 1.4586870539997108 onnxruntime - 0.23239949999999965 + 0.15432031699992876 -.. GENERATED FROM PYTHON SOURCE LINES 129-130 +.. GENERATED FROM PYTHON SOURCE LINES 128-129 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 130-136 +.. GENERATED FROM PYTHON SOURCE LINES 129-135 .. code-block:: default @@ -294,15 +285,13 @@ an smaller ONNX graph for every operator. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -310,35 +299,23 @@ an smaller ONNX graph for every operator. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 3.155 seconds) + **Total running time of the script:** ( 0 minutes 6.685 seconds) .. _sphx_glr_download_auto_examples_plot_benchmark_pipeline.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_benchmark_pipeline.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_benchmark_pipeline.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_benchmark_pipeline.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_benchmark_pipeline.ipynb ` + :download:`Download Jupyter notebook: plot_benchmark_pipeline.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_black_op.rst.txt b/_sources/auto_examples/plot_black_op.rst.txt index b72bf5bec..e6152b248 100644 --- a/_sources/auto_examples/plot_black_op.rst.txt +++ b/_sources/auto_examples/plot_black_op.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_black_op.py" +.. "auto_examples/plot_black_op.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -29,16 +29,13 @@ is missing from the list of available operators. Some converters may convert a model in different ways if the users wants to blacklist some operators. -.. contents:: - :local: - GaussianMixture +++++++++++++++ The first converter to change its behaviour depending on a black list of operators is for model *GaussianMixture*. -.. GENERATED FROM PYTHON SOURCE LINES 25-44 +.. GENERATED FROM PYTHON SOURCE LINES 22-41 .. code-block:: default @@ -74,12 +71,12 @@ of operators is for model *GaussianMixture*.

-.. GENERATED FROM PYTHON SOURCE LINES 45-47 +.. GENERATED FROM PYTHON SOURCE LINES 42-44 Default conversion ++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 47-59 +.. GENERATED FROM PYTHON SOURCE LINES 44-57 .. code-block:: default @@ -88,7 +85,8 @@ Default conversion model, X_train[:1].astype(np.float32), options={id(model): {'score_samples': True}}, target_opset=12) - sess = InferenceSession(model_onnx.SerializeToString()) + sess = InferenceSession(model_onnx.SerializeToString(), + providers=["CPUExecutionProvider"]) xt = X_test[:5].astype(np.float32) print(model.score_samples(xt)) @@ -101,25 +99,23 @@ Default conversion .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [-2.37307714 -1.90706021 -0.73694289 -3.386193 -3.92811531] - [[-2.3730776] - [-1.9070609] - [-0.736943 ] - [-3.3861916] - [-3.9281151]] + [-2.35262849 -1.77470989 -1.85001598 -3.50263433 -2.44689391] + [[-2.3526287] + [-1.7747092] + [-1.8500156] + [-3.5026336] + [-2.4468932]] -.. GENERATED FROM PYTHON SOURCE LINES 60-61 +.. GENERATED FROM PYTHON SOURCE LINES 58-59 Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 61-76 +.. GENERATED FROM PYTHON SOURCE LINES 59-74 .. code-block:: default @@ -149,16 +145,14 @@ Display the ONNX graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 4122.5, 8425.5, -0.5) + (-0.5, 5287.5, 8425.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 77-83 +.. GENERATED FROM PYTHON SOURCE LINES 75-81 Conversion without ReduceLogSumExp ++++++++++++++++++++++++++++++++++ @@ -167,7 +161,7 @@ Parameter *black_op* is used to tell the converter not to use this operator. Let's see what the converter produces in that case. -.. GENERATED FROM PYTHON SOURCE LINES 83-95 +.. GENERATED FROM PYTHON SOURCE LINES 81-94 .. code-block:: default @@ -177,7 +171,8 @@ produces in that case. options={id(model): {'score_samples': True}}, black_op={'ReduceLogSumExp'}, target_opset=12) - sess2 = InferenceSession(model_onnx2.SerializeToString()) + sess2 = InferenceSession(model_onnx2.SerializeToString(), + providers=["CPUExecutionProvider"]) xt = X_test[:5].astype(np.float32) print(model.score_samples(xt)) @@ -189,25 +184,23 @@ produces in that case. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [-2.37307714 -1.90706021 -0.73694289 -3.386193 -3.92811531] - [[-2.3730776] - [-1.9070609] - [-0.736943 ] - [-3.3861916] - [-3.9281156]] + [-2.35262849 -1.77470989 -1.85001598 -3.50263433 -2.44689391] + [[-2.3526287] + [-1.7747092] + [-1.8500156] + [-3.5026336] + [-2.4468932]] -.. GENERATED FROM PYTHON SOURCE LINES 96-97 +.. GENERATED FROM PYTHON SOURCE LINES 95-96 Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 97-112 +.. GENERATED FROM PYTHON SOURCE LINES 96-111 .. code-block:: default @@ -237,21 +230,19 @@ Display the ONNX graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 4305.5, 13264.5, -0.5) + (-0.5, 4921.5, 13264.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 113-115 +.. GENERATED FROM PYTHON SOURCE LINES 112-114 Processing time +++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 115-122 +.. GENERATED FROM PYTHON SOURCE LINES 114-121 .. code-block:: default @@ -268,21 +259,19 @@ Processing time .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0.3067610000000016 - 0.4050269999999969 + 0.4812870999999177 + 0.8075209660000837 -.. GENERATED FROM PYTHON SOURCE LINES 123-124 +.. GENERATED FROM PYTHON SOURCE LINES 122-123 The model using ReduceLogSumExp is much faster. -.. GENERATED FROM PYTHON SOURCE LINES 126-133 +.. GENERATED FROM PYTHON SOURCE LINES 125-132 If the converter cannot convert without... ++++++++++++++++++++++++++++++++++++++++++ @@ -292,7 +281,7 @@ of operators. If a converter fails to convert without using a blacklisted operator (or only whitelisted operators), *skl2onnx* raises an error. -.. GENERATED FROM PYTHON SOURCE LINES 133-144 +.. GENERATED FROM PYTHON SOURCE LINES 132-143 .. code-block:: default @@ -313,8 +302,6 @@ a blacklisted operator (or only whitelisted operators), .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Error: Operator 'Add' is black listed. @@ -322,11 +309,11 @@ a blacklisted operator (or only whitelisted operators), -.. GENERATED FROM PYTHON SOURCE LINES 145-146 +.. GENERATED FROM PYTHON SOURCE LINES 144-145 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 146-154 +.. GENERATED FROM PYTHON SOURCE LINES 145-153 .. code-block:: default @@ -344,15 +331,13 @@ a blacklisted operator (or only whitelisted operators), .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -360,35 +345,23 @@ a blacklisted operator (or only whitelisted operators), .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 11.663 seconds) + **Total running time of the script:** ( 0 minutes 21.877 seconds) .. _sphx_glr_download_auto_examples_plot_black_op.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_black_op.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_black_op.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_black_op.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_black_op.ipynb ` + :download:`Download Jupyter notebook: plot_black_op.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_cast_transformer.rst.txt b/_sources/auto_examples/plot_cast_transformer.rst.txt index cbe786acb..ad6e27de1 100644 --- a/_sources/auto_examples/plot_cast_transformer.rst.txt +++ b/_sources/auto_examples/plot_cast_transformer.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_cast_transformer.py" +.. "auto_examples/plot_cast_transformer.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -37,9 +37,6 @@ a decision tree. One small difference and the decision follows another path in the tree. Let's see how to solve that issue. -.. contents:: - :local: - An example with fails +++++++++++++++++++++ @@ -47,7 +44,7 @@ This is not a typical example, it is build to make it fails based on the assumption ``(x / y)`` is usually different from ``x * ( 1 / y)`` on a computer. -.. GENERATED FROM PYTHON SOURCE LINES 34-52 +.. GENERATED FROM PYTHON SOURCE LINES 31-49 .. code-block:: default @@ -76,11 +73,11 @@ based on the assumption ``(x / y)`` is usually different from -.. GENERATED FROM PYTHON SOURCE LINES 53-54 +.. GENERATED FROM PYTHON SOURCE LINES 50-51 The weird data. -.. GENERATED FROM PYTHON SOURCE LINES 54-68 +.. GENERATED FROM PYTHON SOURCE LINES 51-65 .. code-block:: default @@ -105,11 +102,11 @@ The weird data. -.. GENERATED FROM PYTHON SOURCE LINES 69-70 +.. GENERATED FROM PYTHON SOURCE LINES 66-67 A simple model. -.. GENERATED FROM PYTHON SOURCE LINES 70-78 +.. GENERATED FROM PYTHON SOURCE LINES 67-75 .. code-block:: default @@ -128,16 +125,18 @@ A simple model. -.. GENERATED FROM PYTHON SOURCE LINES 79-80 +.. GENERATED FROM PYTHON SOURCE LINES 76-77 Conversion into ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 80-83 +.. GENERATED FROM PYTHON SOURCE LINES 77-82 .. code-block:: default - onx1 = to_onnx(model1, X_train[:1].astype(np.float32)) - sess1 = InferenceSession(onx1.SerializeToString()) + onx1 = to_onnx(model1, X_train[:1].astype(np.float32), + target_opset=15) + sess1 = InferenceSession(onx1.SerializeToString(), + providers=["CPUExecutionProvider"]) @@ -146,11 +145,11 @@ Conversion into ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 84-85 +.. GENERATED FROM PYTHON SOURCE LINES 83-84 And the maximum difference. -.. GENERATED FROM PYTHON SOURCE LINES 85-96 +.. GENERATED FROM PYTHON SOURCE LINES 84-95 .. code-block:: default @@ -171,8 +170,6 @@ And the maximum difference. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 322.39065126389346 @@ -180,11 +177,11 @@ And the maximum difference. -.. GENERATED FROM PYTHON SOURCE LINES 97-98 +.. GENERATED FROM PYTHON SOURCE LINES 96-97 The graph. -.. GENERATED FROM PYTHON SOURCE LINES 98-112 +.. GENERATED FROM PYTHON SOURCE LINES 97-111 .. code-block:: default @@ -213,16 +210,14 @@ The graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2007.5, 1707.5, -0.5) + (-0.5, 2536.5, 1707.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 113-127 +.. GENERATED FROM PYTHON SOURCE LINES 112-126 New pipeline ++++++++++++ @@ -239,7 +234,7 @@ change the conversion (remove node Scaler by using option `'div'`) and to use double by inserting an explicit Cast. -.. GENERATED FROM PYTHON SOURCE LINES 127-147 +.. GENERATED FROM PYTHON SOURCE LINES 126-148 .. code-block:: default @@ -255,9 +250,11 @@ Cast. exp2 = model2.predict(Xi_test) onx2 = to_onnx(model2, X_train[:1].astype(np.float32), - options={StandardScaler: {'div': 'div_cast'}}) + options={StandardScaler: {'div': 'div_cast'}}, + target_opset=15) - sess2 = InferenceSession(onx2.SerializeToString()) + sess2 = InferenceSession(onx2.SerializeToString(), + providers=["CPUExecutionProvider"]) got2 = sess2.run(None, {'X': Xi_test})[0] md2 = maxdiff(exp2, got2) @@ -269,20 +266,18 @@ Cast. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 2.9884569130445016e-05 + 2.9884569016758178e-05 -.. GENERATED FROM PYTHON SOURCE LINES 148-149 +.. GENERATED FROM PYTHON SOURCE LINES 149-150 The graph. -.. GENERATED FROM PYTHON SOURCE LINES 149-163 +.. GENERATED FROM PYTHON SOURCE LINES 150-164 .. code-block:: default @@ -311,20 +306,18 @@ The graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2007.5, 4171.5, -0.5) + (-0.5, 2536.5, 4171.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 164-165 +.. GENERATED FROM PYTHON SOURCE LINES 165-166 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 165-173 +.. GENERATED FROM PYTHON SOURCE LINES 166-174 .. code-block:: default @@ -342,15 +335,13 @@ The graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -358,35 +349,23 @@ The graph. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.944 seconds) + **Total running time of the script:** ( 0 minutes 3.795 seconds) .. _sphx_glr_download_auto_examples_plot_cast_transformer.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_cast_transformer.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_cast_transformer.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_cast_transformer.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_cast_transformer.ipynb ` + :download:`Download Jupyter notebook: plot_cast_transformer.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_complex_pipeline.rst.txt b/_sources/auto_examples/plot_complex_pipeline.rst.txt index 70d0d0dcf..4e8ae86be 100644 --- a/_sources/auto_examples/plot_complex_pipeline.rst.txt +++ b/_sources/auto_examples/plot_complex_pipeline.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_complex_pipeline.py" +.. "auto_examples/plot_complex_pipeline.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -32,9 +32,6 @@ column may be preprocessed with a different transformer. :ref:`l-complex-pipeline`. -.. contents:: - :local: - Create and train a complex pipeline +++++++++++++++++++++++++++++++++++ @@ -48,7 +45,7 @@ Operators-ml.md#ai.onnx.ml.Imputer>`_ does not handle string type. This cannot be part of the final ONNX pipeline and must be removed. Look for comment starting with ``---`` below. -.. GENERATED FROM PYTHON SOURCE LINES 35-94 +.. GENERATED FROM PYTHON SOURCE LINES 32-91 .. code-block:: default @@ -117,8 +114,6 @@ and must be removed. Look for comment starting with ``---`` below. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none pclass int64 @@ -178,7 +173,7 @@ and must be removed. Look for comment starting with ``---`` below.

-.. GENERATED FROM PYTHON SOURCE LINES 95-101 +.. GENERATED FROM PYTHON SOURCE LINES 92-98 Define the inputs of the ONNX graph +++++++++++++++++++++++++++++++++++ @@ -187,7 +182,7 @@ Define the inputs of the ONNX graph but it needs to know which feature has which name. We simply reuse the dataframe column definition. -.. GENERATED FROM PYTHON SOURCE LINES 101-103 +.. GENERATED FROM PYTHON SOURCE LINES 98-100 .. code-block:: default @@ -199,8 +194,6 @@ We simply reuse the dataframe column definition. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none pclass int64 @@ -221,11 +214,11 @@ We simply reuse the dataframe column definition. -.. GENERATED FROM PYTHON SOURCE LINES 104-105 +.. GENERATED FROM PYTHON SOURCE LINES 101-102 After conversion. -.. GENERATED FROM PYTHON SOURCE LINES 105-126 +.. GENERATED FROM PYTHON SOURCE LINES 102-123 .. code-block:: default @@ -256,8 +249,6 @@ After conversion. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [('pclass', Int64TensorType(shape=[None, 1])), @@ -277,18 +268,18 @@ After conversion. -.. GENERATED FROM PYTHON SOURCE LINES 127-130 +.. GENERATED FROM PYTHON SOURCE LINES 124-127 Merging single column into vectors is not the most efficient way to compute the prediction. It could be done before converting the pipeline into a graph. -.. GENERATED FROM PYTHON SOURCE LINES 132-134 +.. GENERATED FROM PYTHON SOURCE LINES 129-131 Convert the pipeline into ONNX ++++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 134-141 +.. GENERATED FROM PYTHON SOURCE LINES 131-138 .. code-block:: default @@ -306,13 +297,13 @@ Convert the pipeline into ONNX -.. GENERATED FROM PYTHON SOURCE LINES 142-145 +.. GENERATED FROM PYTHON SOURCE LINES 139-142 Predictions are more efficient if the graph is small. That's why the converter checks that there is no unused input. They need to be removed from the graph inputs. -.. GENERATED FROM PYTHON SOURCE LINES 145-155 +.. GENERATED FROM PYTHON SOURCE LINES 142-152 .. code-block:: default @@ -333,13 +324,13 @@ They need to be removed from the graph inputs. -.. GENERATED FROM PYTHON SOURCE LINES 156-159 +.. GENERATED FROM PYTHON SOURCE LINES 153-156 *scikit-learn* does implicit conversions when it can. *sklearn-onnx* does not. The ONNX version of *OneHotEncoder* must be applied on columns of the same type. -.. GENERATED FROM PYTHON SOURCE LINES 159-170 +.. GENERATED FROM PYTHON SOURCE LINES 156-167 .. code-block:: default @@ -361,7 +352,7 @@ must be applied on columns of the same type. -.. GENERATED FROM PYTHON SOURCE LINES 171-177 +.. GENERATED FROM PYTHON SOURCE LINES 168-174 Compare the predictions +++++++++++++++++++++++ @@ -370,7 +361,7 @@ Final step, we need to ensure the converted model produces the same predictions, labels and probabilities. Let's start with *scikit-learn*. -.. GENERATED FROM PYTHON SOURCE LINES 177-181 +.. GENERATED FROM PYTHON SOURCE LINES 174-178 .. code-block:: default @@ -384,18 +375,16 @@ Let's start with *scikit-learn*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 0 0 0] - predict_proba [[0.38641055 0.61358945] - [0.91507927 0.08492073]] + predict [0 0 0 1 1] + predict_proba [[0.79113857 0.20886143] + [0.90867425 0.09132575]] -.. GENERATED FROM PYTHON SOURCE LINES 182-191 +.. GENERATED FROM PYTHON SOURCE LINES 179-188 Predictions with onnxruntime. We need to remove the dropped columns and to change @@ -407,7 +396,7 @@ Last detail, every column was described not really as a vector but as a matrix of one column which explains the last line with the *reshape*. -.. GENERATED FROM PYTHON SOURCE LINES 191-199 +.. GENERATED FROM PYTHON SOURCE LINES 188-196 .. code-block:: default @@ -426,16 +415,17 @@ with the *reshape*. -.. GENERATED FROM PYTHON SOURCE LINES 200-201 +.. GENERATED FROM PYTHON SOURCE LINES 197-198 We are ready to run *onnxruntime*. -.. GENERATED FROM PYTHON SOURCE LINES 201-207 +.. GENERATED FROM PYTHON SOURCE LINES 198-205 .. code-block:: default - sess = rt.InferenceSession("pipeline_titanic.onnx") + sess = rt.InferenceSession("pipeline_titanic.onnx", + providers=["CPUExecutionProvider"]) pred_onx = sess.run(None, inputs) print("predict", pred_onx[0][:5]) print("predict_proba", pred_onx[1][:2]) @@ -446,23 +436,21 @@ We are ready to run *onnxruntime*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 0 0 0] - predict_proba [{0: 0.38641056418418884, 1: 0.6135894060134888}, {0: 0.9150792956352234, 1: 0.08492070436477661}] + predict [0 0 0 1 1] + predict_proba [{0: 0.7911385297775269, 1: 0.20886144042015076}, {0: 0.9086743593215942, 1: 0.09132567048072815}] -.. GENERATED FROM PYTHON SOURCE LINES 208-211 +.. GENERATED FROM PYTHON SOURCE LINES 206-209 The output of onnxruntime is a list of dictionaries. Let's swith to an array but that requires to convert again with an additional option zipmap. -.. GENERATED FROM PYTHON SOURCE LINES 211-223 +.. GENERATED FROM PYTHON SOURCE LINES 209-222 .. code-block:: default @@ -473,7 +461,8 @@ an additional option zipmap. with open("pipeline_titanic_nozipmap.onnx", "wb") as f: f.write(model_onnx.SerializeToString()) - sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx") + sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx", + providers=["CPUExecutionProvider"]) pred_onx = sess.run(None, inputs) print("predict", pred_onx[0][:5]) print("predict_proba", pred_onx[1][:2]) @@ -484,22 +473,20 @@ an additional option zipmap. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 0 0 0] - predict_proba [[0.38641056 0.6135894 ] - [0.9150793 0.0849207 ]] + predict [0 0 0 1 1] + predict_proba [[0.7911385 0.20886144] + [0.90867436 0.09132567]] -.. GENERATED FROM PYTHON SOURCE LINES 224-225 +.. GENERATED FROM PYTHON SOURCE LINES 223-224 Let's check they are the same. -.. GENERATED FROM PYTHON SOURCE LINES 225-227 +.. GENERATED FROM PYTHON SOURCE LINES 224-226 .. code-block:: default @@ -512,7 +499,7 @@ Let's check they are the same. -.. GENERATED FROM PYTHON SOURCE LINES 228-234 +.. GENERATED FROM PYTHON SOURCE LINES 227-233 .. _l-plot-complex-pipeline-graph: @@ -521,7 +508,7 @@ Display the ONNX graph Finally, let's see the graph converted with *sklearn-onnx*. -.. GENERATED FROM PYTHON SOURCE LINES 234-250 +.. GENERATED FROM PYTHON SOURCE LINES 233-249 .. code-block:: default @@ -552,20 +539,18 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 6024.5, 6812.5, -0.5) + (-0.5, 6901.5, 6049.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 251-252 +.. GENERATED FROM PYTHON SOURCE LINES 250-251 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 252-258 +.. GENERATED FROM PYTHON SOURCE LINES 251-257 .. code-block:: default @@ -581,15 +566,13 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -597,35 +580,23 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 6.075 seconds) + **Total running time of the script:** ( 0 minutes 5.573 seconds) .. _sphx_glr_download_auto_examples_plot_complex_pipeline.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_complex_pipeline.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_complex_pipeline.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_complex_pipeline.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_complex_pipeline.ipynb ` + :download:`Download Jupyter notebook: plot_complex_pipeline.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_convert_decision_function.rst.txt b/_sources/auto_examples/plot_convert_decision_function.rst.txt index 9fe0bac5f..9da8a14ce 100644 --- a/_sources/auto_examples/plot_convert_decision_function.rst.txt +++ b/_sources/auto_examples/plot_convert_decision_function.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_convert_decision_function.py" +.. "auto_examples/plot_convert_decision_function.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -31,13 +31,10 @@ the method *decision_function*. Option ``'raw_scores'`` is used to change the default behaviour. Let's see that on a simple example. -.. contents:: - :local: - Train a model and convert it ++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 25-47 +.. GENERATED FROM PYTHON SOURCE LINES 22-44 .. code-block:: default @@ -69,8 +66,6 @@ Train a model and convert it .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none LogisticRegression(max_iter=500) @@ -78,7 +73,7 @@ Train a model and convert it -.. GENERATED FROM PYTHON SOURCE LINES 48-53 +.. GENERATED FROM PYTHON SOURCE LINES 45-50 Output type +++++++++++ @@ -86,12 +81,13 @@ Output type Let's confirm the output type of the probabilities is a list of dictionaries with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 53-59 +.. GENERATED FROM PYTHON SOURCE LINES 50-57 .. code-block:: default - sess = rt.InferenceSession(onx.SerializeToString()) + sess = rt.InferenceSession(onx.SerializeToString(), + providers=["CPUExecutionProvider"]) res = sess.run(None, {'float_input': X_test.astype(numpy.float32)}) print("skl", clr.predict_proba(X_test[:1])) print("onnx", res[1][:2]) @@ -102,23 +98,21 @@ is a list of dictionaries with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - skl [[9.84878200e-01 1.51217635e-02 3.66896809e-08]] - onnx [{0: 0.9848782420158386, 1: 0.015121763572096825, 2: 3.668966286340947e-08}, {0: 0.030543575063347816, 1: 0.912711501121521, 2: 0.056744903326034546}] + skl [[9.94895805e-01 5.10418332e-03 1.21479166e-08]] + onnx [{0: 0.9948958158493042, 1: 0.005104185082018375, 2: 1.2147937766826544e-08}, {0: 0.029782379046082497, 1: 0.9085215926170349, 2: 0.061696045100688934}] -.. GENERATED FROM PYTHON SOURCE LINES 60-63 +.. GENERATED FROM PYTHON SOURCE LINES 58-61 Raw scores and decision_function ++++++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 63-74 +.. GENERATED FROM PYTHON SOURCE LINES 61-73 .. code-block:: default @@ -128,7 +122,8 @@ Raw scores and decision_function onx2 = convert_sklearn(clr, initial_types=initial_type, options=options, target_opset=12) - sess2 = rt.InferenceSession(onx2.SerializeToString()) + sess2 = rt.InferenceSession(onx2.SerializeToString(), + providers=["CPUExecutionProvider"]) res2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)}) print("skl", clr.decision_function(X_test[:1])) print("onnx", res2[1][:2]) @@ -139,21 +134,19 @@ Raw scores and decision_function .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - skl [[ 7.09397199 2.91758901 -10.011561 ]] - onnx [{0: 7.093972206115723, 1: 2.9175891876220703, 2: -10.011560440063477}, {0: -1.3388919830322266, 1: 2.05837345123291, 2: -0.7194805145263672}] + skl [[ 7.83118948 2.55861193 -10.38980141]] + onnx [{0: 7.831189155578613, 1: 2.558612108230591, 2: -10.389801025390625}, {0: -1.3820686340332031, 1: 2.0358331203460693, 2: -0.6537656784057617}] -.. GENERATED FROM PYTHON SOURCE LINES 75-76 +.. GENERATED FROM PYTHON SOURCE LINES 74-75 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 76-82 +.. GENERATED FROM PYTHON SOURCE LINES 75-81 .. code-block:: default @@ -169,15 +162,13 @@ Raw scores and decision_function .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -185,35 +176,23 @@ Raw scores and decision_function .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.067 seconds) + **Total running time of the script:** ( 0 minutes 0.042 seconds) .. _sphx_glr_download_auto_examples_plot_convert_decision_function.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_convert_decision_function.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_convert_decision_function.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_convert_decision_function.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_convert_decision_function.ipynb ` + :download:`Download Jupyter notebook: plot_convert_decision_function.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_convert_model.rst.txt b/_sources/auto_examples/plot_convert_model.rst.txt index c4ed482e4..ee0b744ed 100644 --- a/_sources/auto_examples/plot_convert_model.rst.txt +++ b/_sources/auto_examples/plot_convert_model.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_convert_model.py" +.. "auto_examples/plot_convert_model.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -30,16 +30,13 @@ three following steps: * convert it into *ONNX* with *sklearn-onnx*, * predict with *onnxruntime*. -.. contents:: - :local: - Train a model +++++++++++++ A very basic example using random forest and the iris dataset. -.. GENERATED FROM PYTHON SOURCE LINES 26-45 +.. GENERATED FROM PYTHON SOURCE LINES 23-42 .. code-block:: default @@ -68,8 +65,6 @@ the iris dataset. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none RandomForestClassifier() @@ -77,12 +72,12 @@ the iris dataset. -.. GENERATED FROM PYTHON SOURCE LINES 46-48 +.. GENERATED FROM PYTHON SOURCE LINES 43-45 Convert a model into ONNX +++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 48-56 +.. GENERATED FROM PYTHON SOURCE LINES 45-53 .. code-block:: default @@ -101,16 +96,16 @@ Convert a model into ONNX -.. GENERATED FROM PYTHON SOURCE LINES 57-59 +.. GENERATED FROM PYTHON SOURCE LINES 54-56 Compute the prediction with ONNX Runtime ++++++++++++++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 59-66 +.. GENERATED FROM PYTHON SOURCE LINES 56-63 .. code-block:: default - sess = rt.InferenceSession("rf_iris.onnx") + sess = rt.InferenceSession("rf_iris.onnx", providers=["CPUExecutionProvider"]) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name pred_onx = sess.run( @@ -123,21 +118,19 @@ Compute the prediction with ONNX Runtime .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [0 2 2 0 1 0 2 1 2 2 0 0 0 2 0 0 2 2 2 0 0 0 0 0 1 0 2 1 1 2 2 0 1 1 0 0 0 - 1] + [0 2 1 0 0 2 0 0 1 0 1 0 0 0 1 1 1 0 1 0 1 1 0 2 0 2 2 2 0 1 2 1 2 0 1 2 1 + 2] -.. GENERATED FROM PYTHON SOURCE LINES 67-68 +.. GENERATED FROM PYTHON SOURCE LINES 64-65 Full example with a logistic regression -.. GENERATED FROM PYTHON SOURCE LINES 68-85 +.. GENERATED FROM PYTHON SOURCE LINES 65-82 .. code-block:: default @@ -164,11 +157,9 @@ Full example with a logistic regression .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1): + /home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: @@ -176,17 +167,17 @@ Full example with a logistic regression Please also refer to the documentation for alternative solver options: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression n_iter_i = _check_optimize_result( - [0 2 2 0 1 0 1 1 2 2 0 0 0 2 0 0 2 2 2 0 0 0 0 0 1 0 2 1 1 1 2 0 1 1 0 0 0 - 1] + [0 2 1 0 0 2 0 0 1 0 1 0 0 0 1 1 1 0 1 0 1 1 0 2 0 2 2 2 0 1 2 1 2 0 1 2 1 + 2] -.. GENERATED FROM PYTHON SOURCE LINES 86-87 +.. GENERATED FROM PYTHON SOURCE LINES 83-84 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 87-93 +.. GENERATED FROM PYTHON SOURCE LINES 84-90 .. code-block:: default @@ -202,15 +193,13 @@ Full example with a logistic regression .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -218,35 +207,23 @@ Full example with a logistic regression .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.244 seconds) + **Total running time of the script:** ( 0 minutes 0.182 seconds) .. _sphx_glr_download_auto_examples_plot_convert_model.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_convert_model.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_convert_model.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_convert_model.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_convert_model.ipynb ` + :download:`Download Jupyter notebook: plot_convert_model.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_convert_syntax.rst.txt b/_sources/auto_examples/plot_convert_syntax.rst.txt index 519428954..f9e3e332b 100644 --- a/_sources/auto_examples/plot_convert_syntax.rst.txt +++ b/_sources/auto_examples/plot_convert_syntax.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_convert_syntax.py" +.. "auto_examples/plot_convert_syntax.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -26,16 +26,13 @@ Different ways to convert a model This example leverages some code added to implement custom converters in an easy way. -.. contents:: - :local: - Predict with onnxruntime ++++++++++++++++++++++++ Simple function to check the converted model works fine. -.. GENERATED FROM PYTHON SOURCE LINES 22-42 +.. GENERATED FROM PYTHON SOURCE LINES 19-39 .. code-block:: default @@ -66,14 +63,14 @@ works fine. -.. GENERATED FROM PYTHON SOURCE LINES 43-47 +.. GENERATED FROM PYTHON SOURCE LINES 40-44 Simple KMeans +++++++++++++ The first way: :func:`convert_sklearn`. -.. GENERATED FROM PYTHON SOURCE LINES 47-58 +.. GENERATED FROM PYTHON SOURCE LINES 44-55 .. code-block:: default @@ -94,8 +91,6 @@ The first way: :func:`convert_sklearn`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [1 1 1 1 1 0 0 0 0 0] @@ -103,12 +98,12 @@ The first way: :func:`convert_sklearn`. -.. GENERATED FROM PYTHON SOURCE LINES 59-61 +.. GENERATED FROM PYTHON SOURCE LINES 56-58 The second way: :func:`to_onnx`: no need to play with :class:`FloatTensorType` anymore. -.. GENERATED FROM PYTHON SOURCE LINES 61-70 +.. GENERATED FROM PYTHON SOURCE LINES 58-67 .. code-block:: default @@ -127,22 +122,20 @@ The second way: :func:`to_onnx`: no need to play with .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [0 0 0 0 0 1 1 1 1 1] + [1 1 1 1 1 0 0 0 0 0] -.. GENERATED FROM PYTHON SOURCE LINES 71-74 +.. GENERATED FROM PYTHON SOURCE LINES 68-71 The third way: :func:`wrap_as_onnx_mixin`: wraps the machine learned model into a new class inheriting from :class:`OnnxOperatorMixin`. -.. GENERATED FROM PYTHON SOURCE LINES 74-84 +.. GENERATED FROM PYTHON SOURCE LINES 71-81 .. code-block:: default @@ -162,21 +155,19 @@ inheriting from :class:`OnnxOperatorMixin`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [1 1 1 1 1 0 0 0 0 0] + [0 0 0 0 0 1 1 1 1 1] -.. GENERATED FROM PYTHON SOURCE LINES 85-87 +.. GENERATED FROM PYTHON SOURCE LINES 82-84 The fourth way: :func:`wrap_as_onnx_mixin`: can be called before fitting the model. -.. GENERATED FROM PYTHON SOURCE LINES 87-96 +.. GENERATED FROM PYTHON SOURCE LINES 84-93 .. code-block:: default @@ -195,8 +186,6 @@ before fitting the model. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [1 1 1 1 1 0 0 0 0 0] @@ -204,14 +193,14 @@ before fitting the model. -.. GENERATED FROM PYTHON SOURCE LINES 97-101 +.. GENERATED FROM PYTHON SOURCE LINES 94-98 Pipeline and a custom object ++++++++++++++++++++++++++++ This is a simple scaler. -.. GENERATED FROM PYTHON SOURCE LINES 101-137 +.. GENERATED FROM PYTHON SOURCE LINES 98-134 .. code-block:: default @@ -258,11 +247,11 @@ This is a simple scaler. -.. GENERATED FROM PYTHON SOURCE LINES 138-139 +.. GENERATED FROM PYTHON SOURCE LINES 135-136 Way 1 -.. GENERATED FROM PYTHON SOURCE LINES 139-150 +.. GENERATED FROM PYTHON SOURCE LINES 136-147 .. code-block:: default @@ -283,8 +272,6 @@ Way 1 .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [1 1 1 1 1 0 0 0 0 0] @@ -292,11 +279,11 @@ Way 1 -.. GENERATED FROM PYTHON SOURCE LINES 151-152 +.. GENERATED FROM PYTHON SOURCE LINES 148-149 Way 2 -.. GENERATED FROM PYTHON SOURCE LINES 152-160 +.. GENERATED FROM PYTHON SOURCE LINES 149-157 .. code-block:: default @@ -314,8 +301,6 @@ Way 2 .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [1 1 1 1 1 0 0 0 0 0] @@ -323,11 +308,11 @@ Way 2 -.. GENERATED FROM PYTHON SOURCE LINES 161-162 +.. GENERATED FROM PYTHON SOURCE LINES 158-159 Way 3 -.. GENERATED FROM PYTHON SOURCE LINES 162-172 +.. GENERATED FROM PYTHON SOURCE LINES 159-169 .. code-block:: default @@ -347,8 +332,6 @@ Way 3 .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [1 1 1 1 1 0 0 0 0 0] @@ -356,11 +339,11 @@ Way 3 -.. GENERATED FROM PYTHON SOURCE LINES 173-174 +.. GENERATED FROM PYTHON SOURCE LINES 170-171 Way 4 -.. GENERATED FROM PYTHON SOURCE LINES 174-185 +.. GENERATED FROM PYTHON SOURCE LINES 171-182 .. code-block:: default @@ -381,23 +364,21 @@ Way 4 .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [0 0 0 0 0 1 1 1 1 1] + [1 1 1 1 1 0 0 0 0 0] -.. GENERATED FROM PYTHON SOURCE LINES 186-190 +.. GENERATED FROM PYTHON SOURCE LINES 183-187 Display the ONNX graph ++++++++++++++++++++++ Finally, let's see the graph converted with *sklearn-onnx*. -.. GENERATED FROM PYTHON SOURCE LINES 190-207 +.. GENERATED FROM PYTHON SOURCE LINES 187-204 .. code-block:: default @@ -429,20 +410,18 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2595.5, 6900.5, -0.5) + (-0.5, 3103.5, 6900.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 208-209 +.. GENERATED FROM PYTHON SOURCE LINES 205-206 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 209-217 +.. GENERATED FROM PYTHON SOURCE LINES 206-214 .. code-block:: default @@ -460,15 +439,13 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -476,35 +453,23 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.972 seconds) + **Total running time of the script:** ( 0 minutes 3.632 seconds) .. _sphx_glr_download_auto_examples_plot_convert_syntax.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_convert_syntax.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_convert_syntax.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_convert_syntax.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_convert_syntax.ipynb ` + :download:`Download Jupyter notebook: plot_convert_syntax.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_convert_zipmap.rst.txt b/_sources/auto_examples/plot_convert_zipmap.rst.txt index a4e2429b3..7523cdd11 100644 --- a/_sources/auto_examples/plot_convert_zipmap.rst.txt +++ b/_sources/auto_examples/plot_convert_zipmap.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_convert_zipmap.py" +.. "auto_examples/plot_convert_zipmap.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -31,13 +31,10 @@ This conversion increases the prediction time and is not always needed. Let's see how to deactivate this behaviour on the Iris example. -.. contents:: - :local: - Train a model and convert it ++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 25-48 +.. GENERATED FROM PYTHON SOURCE LINES 22-45 .. code-block:: default @@ -70,8 +67,6 @@ Train a model and convert it .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none LogisticRegression(max_iter=500) @@ -79,7 +74,7 @@ Train a model and convert it -.. GENERATED FROM PYTHON SOURCE LINES 49-54 +.. GENERATED FROM PYTHON SOURCE LINES 46-51 Output type +++++++++++ @@ -87,7 +82,7 @@ Output type Let's confirm the output type of the probabilities is a list of dictionaries with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 54-61 +.. GENERATED FROM PYTHON SOURCE LINES 51-58 .. code-block:: default @@ -104,25 +99,23 @@ is a list of dictionaries with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [{0: 1.8009059488122148e-07, 1: 0.007128315046429634, 2: 0.9928714632987976}, {0: 0.9786841869354248, 1: 0.021315651014447212, 2: 1.8146978675304126e-07}] + [{0: 0.9724591970443726, 1: 0.027540581300854683, 2: 2.352435330976732e-07}, {0: 0.9776268601417542, 1: 0.022372985258698463, 2: 1.9038986920349998e-07}] probabilities type: type for the first observations: -.. GENERATED FROM PYTHON SOURCE LINES 62-66 +.. GENERATED FROM PYTHON SOURCE LINES 59-63 Without ZipMap ++++++++++++++ Let's remove the ZipMap operator. -.. GENERATED FROM PYTHON SOURCE LINES 66-78 +.. GENERATED FROM PYTHON SOURCE LINES 63-75 .. code-block:: default @@ -144,19 +137,17 @@ Let's remove the ZipMap operator. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[1.8009059e-07 7.1283150e-03 9.9287146e-01] - [9.7868419e-01 2.1315651e-02 1.8146979e-07]] + [[9.7245920e-01 2.7540581e-02 2.3524353e-07] + [9.7762686e-01 2.2372985e-02 1.9038987e-07]] probabilities type: type for the first observations: -.. GENERATED FROM PYTHON SOURCE LINES 79-85 +.. GENERATED FROM PYTHON SOURCE LINES 76-82 One output per class ++++++++++++++++++++ @@ -165,7 +156,7 @@ This options removes the final operator ZipMap and splits the probabilities into columns. The final model produces one output for the label, and one output per class. -.. GENERATED FROM PYTHON SOURCE LINES 85-97 +.. GENERATED FROM PYTHON SOURCE LINES 82-94 .. code-block:: default @@ -187,24 +178,22 @@ one output for the label, and one output per class. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - output: 'output_label' shape=(38,) values=[2 0]... - output: 'i0' shape=(38,) values=[1.800906e-07 9.786842e-01]... - output: 'i1' shape=(38,) values=[0.00712832 0.02131565]... - output: 'i2' shape=(38,) values=[9.9287146e-01 1.8146979e-07]... + output: 'output_label' shape=(38,) values=[0 0]... + output: 'i0' shape=(38,) values=[0.9724592 0.97762686]... + output: 'i1' shape=(38,) values=[0.02754058 0.02237299]... + output: 'i2' shape=(38,) values=[2.3524353e-07 1.9038987e-07]... -.. GENERATED FROM PYTHON SOURCE LINES 98-100 +.. GENERATED FROM PYTHON SOURCE LINES 95-97 Let's compare prediction time +++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 100-122 +.. GENERATED FROM PYTHON SOURCE LINES 97-119 .. code-block:: default @@ -236,25 +225,23 @@ Let's compare prediction time .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Time with ZipMap: - [0.005292300000000694, 0.004656499999999397, 0.005002999999998536, 0.004417500000002406, 0.004069700000002285, 0.003911200000001003, 0.004106600000000071, 0.004177900000001955, 0.0040183999999996445, 0.0039039000000009594] + [0.012372604999882242, 0.004819102000055864, 0.002799700999958077, 0.005126302000007854, 0.004604101999802879, 0.0027419010000357957, 0.0030426010000610404, 0.0026854010000079143, 0.0026163009999891074, 0.0026090009998824826] Time without ZipMap: - [0.0016521999999987713, 0.0016895000000012317, 0.0016119000000003325, 0.00159049999999894, 0.0016048000000026263, 0.0016874000000015599, 0.001752899999999613, 0.0016172000000018727, 0.0015882999999980996, 0.001588700000002774] + [0.0016827009999360598, 0.0011974000001373497, 0.0012046009999266971, 0.0011644000001069799, 0.0011607010001171147, 0.0012080000001333246, 0.001181700999950408, 0.0011657999998533342, 0.0013147009999556758, 0.0015800999999555643] Time without ZipMap but with columns: - [0.003002200000000954, 0.002960300000001581, 0.0030136999999967884, 0.0029430000000019163, 0.0028410999999977093, 0.0028338999999988346, 0.003849699999999956, 0.003873699999999758, 0.003992600000000124, 0.004027000000000669] + [0.00204040099993108, 0.0020714009999664995, 0.0020063009999375936, 0.001988400999834994, 0.0019787999999607564, 0.0019756009999127855, 0.0021800010001697956, 0.002363000999821452, 0.0021929009999439586, 0.002378501000066535] -.. GENERATED FROM PYTHON SOURCE LINES 123-124 +.. GENERATED FROM PYTHON SOURCE LINES 120-121 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 124-130 +.. GENERATED FROM PYTHON SOURCE LINES 121-127 .. code-block:: default @@ -270,15 +257,13 @@ Let's compare prediction time .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -286,35 +271,23 @@ Let's compare prediction time .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.195 seconds) + **Total running time of the script:** ( 0 minutes 0.153 seconds) .. _sphx_glr_download_auto_examples_plot_convert_zipmap.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_convert_zipmap.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_convert_zipmap.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_convert_zipmap.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_convert_zipmap.ipynb ` + :download:`Download Jupyter notebook: plot_convert_zipmap.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_custom_model.rst.txt b/_sources/auto_examples/plot_custom_model.rst.txt index ffba4eb61..436f4f3ab 100644 --- a/_sources/auto_examples/plot_custom_model.rst.txt +++ b/_sources/auto_examples/plot_custom_model.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_custom_model.py" +.. "auto_examples/plot_custom_model.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -39,10 +39,6 @@ generated/sklearn.manifold.TSNE.html#sklearn.manifold.TSNE.fit_transform>`_. This example proposes a way to train a machine learned model which approximates the outputs of a *t-SNE* transformer. - -.. contents:: - :local: - Implementation of the new transform +++++++++++++++++++++++++++++++++++ @@ -60,7 +56,7 @@ And to predict on a test set: * k nearest neightbours, :math:`f(X') \rightarrow X'_3` * final normalization, simple scaling :math:`X'_3 \rightarrow X'_4` -.. GENERATED FROM PYTHON SOURCE LINES 47-210 +.. GENERATED FROM PYTHON SOURCE LINES 43-206 .. code-block:: default @@ -234,14 +230,14 @@ And to predict on a test set: -.. GENERATED FROM PYTHON SOURCE LINES 211-215 +.. GENERATED FROM PYTHON SOURCE LINES 207-211 Experimentation on MNIST ++++++++++++++++++++++++ Let's fit t-SNE... -.. GENERATED FROM PYTHON SOURCE LINES 215-264 +.. GENERATED FROM PYTHON SOURCE LINES 211-260 .. code-block:: default @@ -305,24 +301,22 @@ Let's fit t-SNE... .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - array([, - ], dtype=object) + array([, + ], dtype=object) -.. GENERATED FROM PYTHON SOURCE LINES 265-269 +.. GENERATED FROM PYTHON SOURCE LINES 261-265 Repeatable t-SNE ++++++++++++++++ Just to check it is working. -.. GENERATED FROM PYTHON SOURCE LINES 269-278 +.. GENERATED FROM PYTHON SOURCE LINES 265-274 .. code-block:: default @@ -346,21 +340,19 @@ Just to check it is working. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - array([, - ], dtype=object) + array([, + ], dtype=object) -.. GENERATED FROM PYTHON SOURCE LINES 279-280 +.. GENERATED FROM PYTHON SOURCE LINES 275-276 We check on test set. -.. GENERATED FROM PYTHON SOURCE LINES 280-287 +.. GENERATED FROM PYTHON SOURCE LINES 276-283 .. code-block:: default @@ -382,17 +374,15 @@ We check on test set. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - array([, - ], dtype=object) + array([, + ], dtype=object) -.. GENERATED FROM PYTHON SOURCE LINES 288-296 +.. GENERATED FROM PYTHON SOURCE LINES 284-292 ONNX - shape_calculator, converter ++++++++++++++++++++++++++++++++++ @@ -403,7 +393,7 @@ one to calculate the shape of the outputs based on the inputs, the other one to do the actual conversion of the model. -.. GENERATED FROM PYTHON SOURCE LINES 296-311 +.. GENERATED FROM PYTHON SOURCE LINES 292-307 .. code-block:: default @@ -429,12 +419,12 @@ conversion of the model. -.. GENERATED FROM PYTHON SOURCE LINES 312-314 +.. GENERATED FROM PYTHON SOURCE LINES 308-310 Then the converter model. We reuse existing converter. -.. GENERATED FROM PYTHON SOURCE LINES 314-358 +.. GENERATED FROM PYTHON SOURCE LINES 310-354 .. code-block:: default @@ -489,11 +479,11 @@ reuse existing converter. -.. GENERATED FROM PYTHON SOURCE LINES 359-360 +.. GENERATED FROM PYTHON SOURCE LINES 355-356 We now need to declare the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 360-366 +.. GENERATED FROM PYTHON SOURCE LINES 356-362 .. code-block:: default @@ -510,7 +500,7 @@ We now need to declare the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 367-372 +.. GENERATED FROM PYTHON SOURCE LINES 363-368 Conversion to ONNX ++++++++++++++++++ @@ -518,7 +508,7 @@ Conversion to ONNX We just need to call *convert_sklearn* as any other model to convert. -.. GENERATED FROM PYTHON SOURCE LINES 372-382 +.. GENERATED FROM PYTHON SOURCE LINES 368-378 .. code-block:: default @@ -539,11 +529,11 @@ to convert. -.. GENERATED FROM PYTHON SOURCE LINES 383-384 +.. GENERATED FROM PYTHON SOURCE LINES 379-380 We now compare the prediction. -.. GENERATED FROM PYTHON SOURCE LINES 384-387 +.. GENERATED FROM PYTHON SOURCE LINES 380-383 .. code-block:: default @@ -556,22 +546,20 @@ We now compare the prediction. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ptsne_knn.tranform - [[-1.1236873 -0.2753877 ] - [-1.3062997 -0.24523106]] + [[ 1.1081381 0.09305604] + [-0.05512788 -0.40800577]] -.. GENERATED FROM PYTHON SOURCE LINES 388-389 +.. GENERATED FROM PYTHON SOURCE LINES 384-385 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 389-395 +.. GENERATED FROM PYTHON SOURCE LINES 385-391 .. code-block:: default @@ -587,22 +575,20 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - transform [[-1.1236873 -0.2753877]] + transform [[1.1081381 0.09305604]] -.. GENERATED FROM PYTHON SOURCE LINES 396-399 +.. GENERATED FROM PYTHON SOURCE LINES 392-395 The converter for the nearest neighbours produces an ONNX graph which does not allow multiple predictions at a time. Let's call *onnxruntime* for the second row. -.. GENERATED FROM PYTHON SOURCE LINES 399-403 +.. GENERATED FROM PYTHON SOURCE LINES 395-399 .. code-block:: default @@ -616,21 +602,19 @@ which does not allow multiple predictions at a time. Let's call .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - transform [[-1.3062997 -0.24523106]] + transform [[-0.05512788 -0.40800577]] -.. GENERATED FROM PYTHON SOURCE LINES 404-406 +.. GENERATED FROM PYTHON SOURCE LINES 400-402 Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 406-420 +.. GENERATED FROM PYTHON SOURCE LINES 402-416 .. code-block:: default @@ -659,20 +643,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2071.5, 9099.5, -0.5) + (-0.5, 2643.5, 9099.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 421-422 +.. GENERATED FROM PYTHON SOURCE LINES 417-418 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 422-428 +.. GENERATED FROM PYTHON SOURCE LINES 418-424 .. code-block:: default @@ -688,15 +670,13 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -704,35 +684,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 8.397 seconds) + **Total running time of the script:** ( 0 minutes 25.419 seconds) .. _sphx_glr_download_auto_examples_plot_custom_model.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_custom_model.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_custom_model.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_custom_model.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_custom_model.ipynb ` + :download:`Download Jupyter notebook: plot_custom_model.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_custom_parser.rst.txt b/_sources/auto_examples/plot_custom_parser.rst.txt index 80d6f7b75..ae924dcd3 100644 --- a/_sources/auto_examples/plot_custom_parser.rst.txt +++ b/_sources/auto_examples/plot_custom_parser.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_custom_parser.py" +.. "auto_examples/plot_custom_parser.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -30,16 +30,13 @@ to add a third result which tells if the probability is above a given threshold. That's implemented in method *validate*. -.. contents:: - :local: - Iris and scoring ++++++++++++++++ A new class is created, it trains any classifier and implements the method *validate* mentioned above. -.. GENERATED FROM PYTHON SOURCE LINES 26-87 +.. GENERATED FROM PYTHON SOURCE LINES 23-84 .. code-block:: default @@ -117,13 +114,13 @@ the method *validate* mentioned above.

-.. GENERATED FROM PYTHON SOURCE LINES 88-91 +.. GENERATED FROM PYTHON SOURCE LINES 85-88 Let's now measure the indicator which tells if the probability of a prediction is above a threshold. -.. GENERATED FROM PYTHON SOURCE LINES 91-94 +.. GENERATED FROM PYTHON SOURCE LINES 88-91 .. code-block:: default @@ -136,17 +133,15 @@ a threshold. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [0 1 0 0 1 1 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 1 1 0 1 1 1 0 0 1 1 1 0 1 0 0 1 + [1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 1 1 1] -.. GENERATED FROM PYTHON SOURCE LINES 95-101 +.. GENERATED FROM PYTHON SOURCE LINES 92-98 Conversion to ONNX +++++++++++++++++++ @@ -155,7 +150,7 @@ The conversion fails for a new model because the library does not know any converter associated to this new model. -.. GENERATED FROM PYTHON SOURCE LINES 101-108 +.. GENERATED FROM PYTHON SOURCE LINES 98-105 .. code-block:: default @@ -172,8 +167,6 @@ to this new model. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Unable to find a shape calculator for type ''. @@ -193,7 +186,7 @@ to this new model. -.. GENERATED FROM PYTHON SOURCE LINES 109-115 +.. GENERATED FROM PYTHON SOURCE LINES 106-112 Custom converter ++++++++++++++++ @@ -202,7 +195,7 @@ We reuse some pieces of code from :ref:`l-custom-model`. The shape calculator defines the shape of every output of the converted model. -.. GENERATED FROM PYTHON SOURCE LINES 115-132 +.. GENERATED FROM PYTHON SOURCE LINES 112-129 .. code-block:: default @@ -230,11 +223,11 @@ of the converted model. -.. GENERATED FROM PYTHON SOURCE LINES 133-134 +.. GENERATED FROM PYTHON SOURCE LINES 130-131 Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 134-179 +.. GENERATED FROM PYTHON SOURCE LINES 131-186 .. code-block:: default @@ -263,9 +256,19 @@ Then the converter. # We now handle the validation. val_max = scope.get_unique_variable_name('val_max') - container.add_node('ReduceMax', val_prob.full_name, val_max, - name=scope.get_unique_operator_name('ReduceMax'), - axes=[1], keepdims=0) + if container.target_opset >= 18: + axis_name = scope.get_unique_variable_name('axis') + container.add_initializer( + axis_name, onnx_proto.TensorProto.INT64, [1], [1]) + container.add_node( + 'ReduceMax', [val_prob.full_name, axis_name], val_max, + name=scope.get_unique_operator_name('ReduceMax'), + keepdims=0) + else: + container.add_node( + 'ReduceMax', val_prob.full_name, val_max, + name=scope.get_unique_operator_name('ReduceMax'), + axes=[1], keepdims=0) th_name = scope.get_unique_variable_name('threshold') container.add_initializer( @@ -290,11 +293,11 @@ Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 180-181 +.. GENERATED FROM PYTHON SOURCE LINES 187-188 Then the registration. -.. GENERATED FROM PYTHON SOURCE LINES 181-187 +.. GENERATED FROM PYTHON SOURCE LINES 188-194 .. code-block:: default @@ -311,11 +314,11 @@ Then the registration. -.. GENERATED FROM PYTHON SOURCE LINES 188-189 +.. GENERATED FROM PYTHON SOURCE LINES 195-196 And conversion... -.. GENERATED FROM PYTHON SOURCE LINES 189-196 +.. GENERATED FROM PYTHON SOURCE LINES 196-203 .. code-block:: default @@ -332,8 +335,6 @@ And conversion... .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 3 outputs expected not 2. @@ -341,7 +342,7 @@ And conversion... -.. GENERATED FROM PYTHON SOURCE LINES 197-204 +.. GENERATED FROM PYTHON SOURCE LINES 204-211 It fails because the library expected the model to behave like a classifier which produces two @@ -351,7 +352,7 @@ tell the library this model produces three outputs. Custom parser +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 204-224 +.. GENERATED FROM PYTHON SOURCE LINES 211-231 .. code-block:: default @@ -382,11 +383,11 @@ Custom parser -.. GENERATED FROM PYTHON SOURCE LINES 225-226 +.. GENERATED FROM PYTHON SOURCE LINES 232-233 Registration. -.. GENERATED FROM PYTHON SOURCE LINES 226-233 +.. GENERATED FROM PYTHON SOURCE LINES 233-240 .. code-block:: default @@ -404,11 +405,11 @@ Registration. -.. GENERATED FROM PYTHON SOURCE LINES 234-235 +.. GENERATED FROM PYTHON SOURCE LINES 241-242 And conversion again. -.. GENERATED FROM PYTHON SOURCE LINES 235-239 +.. GENERATED FROM PYTHON SOURCE LINES 242-246 .. code-block:: default @@ -423,14 +424,14 @@ And conversion again. -.. GENERATED FROM PYTHON SOURCE LINES 240-244 +.. GENERATED FROM PYTHON SOURCE LINES 247-251 Final test ++++++++++ We need now to check the results are the same with ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 244-260 +.. GENERATED FROM PYTHON SOURCE LINES 251-267 .. code-block:: default @@ -456,39 +457,37 @@ We need now to check the results are the same with ONNX. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none --labels-- - sklearn [2 0 2 2 1] - onnx [2 0 2 2 1] + sklearn [0 0 0 2 2] + onnx [0 0 0 2 2] --probabilities-- - sklearn [[3.71357231e-04 2.96586450e-01 7.03042193e-01] - [8.65065673e-01 1.34848325e-01 8.60018083e-05] - [1.17898120e-03 3.65979433e-01 6.32841586e-01] - [6.79752603e-03 3.68582685e-01 6.24619789e-01] - [5.02545438e-02 7.88441754e-01 1.61303702e-01]] - onnx [[3.7135908e-04 2.9658648e-01 7.0304215e-01] - [8.6506563e-01 1.3484833e-01 8.5985652e-05] - [1.1789729e-03 3.6597937e-01 6.3284159e-01] - [6.7974939e-03 3.6858279e-01 6.2461972e-01] - [5.0254531e-02 7.8844190e-01 1.6130358e-01]] + sklearn [[8.15388734e-01 1.84445827e-01 1.65438648e-04] + [8.62653409e-01 1.37139236e-01 2.07354284e-04] + [9.04537964e-01 9.53809626e-02 8.10733209e-05] + [4.98573169e-02 3.25151760e-01 6.24990923e-01] + [3.20811905e-03 3.87065374e-01 6.09726507e-01]] + onnx [[8.1538868e-01 1.8444584e-01 1.6544168e-04] + [8.6265337e-01 1.3713925e-01 2.0727392e-04] + [9.0453798e-01 9.5380947e-02 8.1110506e-05] + [4.9857341e-02 3.2515183e-01 6.2499082e-01] + [3.2081197e-03 3.8706535e-01 6.0972649e-01]] --validation-- - sklearn [0 1 0 0 1] - onnx [0 1 0 0 1] + sklearn [1 1 1 0 0] + onnx [1 1 1 0 0] -.. GENERATED FROM PYTHON SOURCE LINES 261-265 +.. GENERATED FROM PYTHON SOURCE LINES 268-272 It looks good. Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 265-279 +.. GENERATED FROM PYTHON SOURCE LINES 272-286 .. code-block:: default @@ -517,20 +516,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2513.5, 4934.5, -0.5) + (-0.5, 3160.5, 4934.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 280-281 +.. GENERATED FROM PYTHON SOURCE LINES 287-288 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 281-287 +.. GENERATED FROM PYTHON SOURCE LINES 288-294 .. code-block:: default @@ -546,15 +543,13 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -562,35 +557,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.021 seconds) + **Total running time of the script:** ( 0 minutes 2.623 seconds) .. _sphx_glr_download_auto_examples_plot_custom_parser.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_custom_parser.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_custom_parser.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_custom_parser.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_custom_parser.ipynb ` + :download:`Download Jupyter notebook: plot_custom_parser.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_custom_parser_alternative.rst.txt b/_sources/auto_examples/plot_custom_parser_alternative.rst.txt index 75e6a1ee7..2c4623bd9 100644 --- a/_sources/auto_examples/plot_custom_parser_alternative.rst.txt +++ b/_sources/auto_examples/plot_custom_parser_alternative.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_custom_parser_alternative.py" +.. "auto_examples/plot_custom_parser_alternative.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -35,16 +35,13 @@ to add a third result which tells if the probability is above a given threshold. That's implemented in method *validate*. -.. contents:: - :local: - Iris and scoring ++++++++++++++++ A new class is created, it trains any classifier and implements the method *validate* mentioned above. -.. GENERATED FROM PYTHON SOURCE LINES 31-92 +.. GENERATED FROM PYTHON SOURCE LINES 28-89 .. code-block:: default @@ -65,7 +62,7 @@ the method *validate* mentioned above. from skl2onnx.proto import onnx_proto from skl2onnx.common.data_types import FloatTensorType, Int64TensorType from skl2onnx.algebra.onnx_ops import ( - OnnxGreater, OnnxCast, OnnxReduceMax, OnnxIdentity + OnnxGreater, OnnxCast, OnnxReduceMaxApi18, OnnxIdentity ) from skl2onnx.algebra.onnx_operator import OnnxSubEstimator import matplotlib.pyplot as plt @@ -122,13 +119,13 @@ the method *validate* mentioned above.

-.. GENERATED FROM PYTHON SOURCE LINES 93-96 +.. GENERATED FROM PYTHON SOURCE LINES 90-93 Let's now measure the indicator which tells if the probability of a prediction is above a threshold. -.. GENERATED FROM PYTHON SOURCE LINES 96-99 +.. GENERATED FROM PYTHON SOURCE LINES 93-96 .. code-block:: default @@ -141,17 +138,15 @@ a threshold. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [1 1 0 0 1 1 1 1 1 0 0 1 1 1 0 1 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 1 0 - 0] + [1 1 1 0 1 0 0 0 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 0 0 0 0 0 1 0 1 0 1 0 + 1] -.. GENERATED FROM PYTHON SOURCE LINES 100-106 +.. GENERATED FROM PYTHON SOURCE LINES 97-103 Conversion to ONNX +++++++++++++++++++ @@ -160,7 +155,7 @@ The conversion fails for a new model because the library does not know any converter associated to this new model. -.. GENERATED FROM PYTHON SOURCE LINES 106-113 +.. GENERATED FROM PYTHON SOURCE LINES 103-110 .. code-block:: default @@ -177,8 +172,6 @@ to this new model. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Unable to find a shape calculator for type ''. @@ -198,7 +191,7 @@ to this new model. -.. GENERATED FROM PYTHON SOURCE LINES 114-120 +.. GENERATED FROM PYTHON SOURCE LINES 111-117 Custom converter ++++++++++++++++ @@ -207,7 +200,7 @@ We reuse some pieces of code from :ref:`l-custom-model`. The shape calculator defines the shape of every output of the converted model. -.. GENERATED FROM PYTHON SOURCE LINES 120-137 +.. GENERATED FROM PYTHON SOURCE LINES 117-134 .. code-block:: default @@ -235,11 +228,11 @@ of the converted model. -.. GENERATED FROM PYTHON SOURCE LINES 138-139 +.. GENERATED FROM PYTHON SOURCE LINES 135-136 Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 139-171 +.. GENERATED FROM PYTHON SOURCE LINES 136-168 .. code-block:: default @@ -257,7 +250,7 @@ Then the converter. onnx_op = OnnxSubEstimator(model, input0, op_version=opv, options={'zipmap': False}) - rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv) + rmax = OnnxReduceMaxApi18(onnx_op[1], axes=[1], keepdims=0, op_version=opv) great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32), op_version=opv) valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, @@ -282,11 +275,11 @@ Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 172-173 +.. GENERATED FROM PYTHON SOURCE LINES 169-170 Then the registration. -.. GENERATED FROM PYTHON SOURCE LINES 173-179 +.. GENERATED FROM PYTHON SOURCE LINES 170-176 .. code-block:: default @@ -303,11 +296,11 @@ Then the registration. -.. GENERATED FROM PYTHON SOURCE LINES 180-181 +.. GENERATED FROM PYTHON SOURCE LINES 177-178 And conversion... -.. GENERATED FROM PYTHON SOURCE LINES 181-188 +.. GENERATED FROM PYTHON SOURCE LINES 178-185 .. code-block:: default @@ -324,8 +317,6 @@ And conversion... .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 3 outputs expected not 2. @@ -333,7 +324,7 @@ And conversion... -.. GENERATED FROM PYTHON SOURCE LINES 189-196 +.. GENERATED FROM PYTHON SOURCE LINES 186-193 It fails because the library expected the model to behave like a classifier which produces two @@ -343,7 +334,7 @@ tell the library this model produces three outputs. Custom parser +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 196-216 +.. GENERATED FROM PYTHON SOURCE LINES 193-213 .. code-block:: default @@ -374,11 +365,11 @@ Custom parser -.. GENERATED FROM PYTHON SOURCE LINES 217-218 +.. GENERATED FROM PYTHON SOURCE LINES 214-215 Registration. -.. GENERATED FROM PYTHON SOURCE LINES 218-225 +.. GENERATED FROM PYTHON SOURCE LINES 215-222 .. code-block:: default @@ -396,11 +387,11 @@ Registration. -.. GENERATED FROM PYTHON SOURCE LINES 226-227 +.. GENERATED FROM PYTHON SOURCE LINES 223-224 And conversion again. -.. GENERATED FROM PYTHON SOURCE LINES 227-231 +.. GENERATED FROM PYTHON SOURCE LINES 224-228 .. code-block:: default @@ -415,14 +406,14 @@ And conversion again. -.. GENERATED FROM PYTHON SOURCE LINES 232-236 +.. GENERATED FROM PYTHON SOURCE LINES 229-233 Final test ++++++++++ We need now to check the results are the same with ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 236-252 +.. GENERATED FROM PYTHON SOURCE LINES 233-249 .. code-block:: default @@ -448,39 +439,37 @@ We need now to check the results are the same with ONNX. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none --labels-- - sklearn [0 0 1 2 0] - onnx [0 0 1 2 0] + sklearn [2 0 0 2 0] + onnx [2 0 0 2 0] --probabilities-- - sklearn [[8.41548436e-01 1.58418465e-01 3.30992953e-05] - [9.13732953e-01 8.62424261e-02 2.46206233e-05] - [6.68109438e-02 6.52596493e-01 2.80592563e-01] - [1.43365308e-03 4.17237275e-01 5.81329072e-01] - [9.08895793e-01 9.11019365e-02 2.27013292e-06]] - onnx [[8.4154850e-01 1.5841845e-01 3.3074029e-05] - [9.1373289e-01 8.6242430e-02 2.4681309e-05] - [6.6810913e-02 6.5259665e-01 2.8059244e-01] - [1.4336860e-03 4.1723728e-01 5.8132905e-01] - [9.0889579e-01 9.1101922e-02 2.2608199e-06]] + sklearn [[6.57906777e-04 1.91959240e-01 8.07382854e-01] + [8.72054450e-01 1.27893759e-01 5.17916588e-05] + [8.53658204e-01 1.46046803e-01 2.94993135e-04] + [2.73275979e-04 3.98725606e-01 6.01001118e-01] + [9.70430915e-01 2.95535348e-02 1.55504186e-05]] + onnx [[6.5787166e-04 1.9195931e-01 8.0738282e-01] + [8.7205452e-01 1.2789373e-01 5.1797400e-05] + [8.5365826e-01 1.4604677e-01 2.9501395e-04] + [2.7327205e-04 3.9872569e-01 6.0100102e-01] + [9.7043103e-01 2.9553531e-02 1.5552911e-05]] --validation-- - sklearn [1 1 0 0 1] - onnx [1 1 0 0 1] + sklearn [1 1 1 0 1] + onnx [1 1 1 0 1] -.. GENERATED FROM PYTHON SOURCE LINES 253-257 +.. GENERATED FROM PYTHON SOURCE LINES 250-254 It looks good. Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 257-271 +.. GENERATED FROM PYTHON SOURCE LINES 254-268 .. code-block:: default @@ -509,20 +498,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2903.5, 4934.5, -0.5) + (-0.5, 3414.5, 4934.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 272-273 +.. GENERATED FROM PYTHON SOURCE LINES 269-270 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 273-279 +.. GENERATED FROM PYTHON SOURCE LINES 270-276 .. code-block:: default @@ -538,15 +525,13 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -554,35 +539,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.296 seconds) + **Total running time of the script:** ( 0 minutes 2.812 seconds) .. _sphx_glr_download_auto_examples_plot_custom_parser_alternative.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_custom_parser_alternative.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_custom_parser_alternative.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_custom_parser_alternative.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_custom_parser_alternative.ipynb ` + :download:`Download Jupyter notebook: plot_custom_parser_alternative.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_errors_onnxruntime.rst.txt b/_sources/auto_examples/plot_errors_onnxruntime.rst.txt index fb4e659fd..0c05f2afe 100644 --- a/_sources/auto_examples/plot_errors_onnxruntime.rst.txt +++ b/_sources/auto_examples/plot_errors_onnxruntime.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_errors_onnxruntime.py" +.. "auto_examples/plot_errors_onnxruntime.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -96,8 +96,6 @@ and cannot handle any other kind of floats. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Unexpected type @@ -129,8 +127,6 @@ is misspelled. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Misspelled output name @@ -160,8 +156,6 @@ and *onnxruntime* will then return all the outputs. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none All outputs @@ -192,8 +186,6 @@ The same goes if the input name is misspelled. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Misspelled input name @@ -243,8 +235,6 @@ dimension is a multiple of the expected input dimension. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Shape=(4,) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 1 Expected: 2 Please fix either the inputs or the model. @@ -295,8 +285,6 @@ is higher than expects but produces a warning. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Shape=(1, 2, 2) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 3 Expected: 2 Please fix either the inputs or the model. @@ -326,15 +314,13 @@ is higher than expects but produces a warning. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -342,35 +328,23 @@ is higher than expects but produces a warning. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.067 seconds) + **Total running time of the script:** ( 0 minutes 0.077 seconds) .. _sphx_glr_download_auto_examples_plot_errors_onnxruntime.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_errors_onnxruntime.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_errors_onnxruntime.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_errors_onnxruntime.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_errors_onnxruntime.ipynb ` + :download:`Download Jupyter notebook: plot_errors_onnxruntime.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_gpr.rst.txt b/_sources/auto_examples/plot_gpr.rst.txt index f4779a5a7..b2ba3ecb6 100644 --- a/_sources/auto_examples/plot_gpr.rst.txt +++ b/_sources/auto_examples/plot_gpr.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_gpr.py" +.. "auto_examples/plot_gpr.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -31,23 +31,20 @@ precisions. *sklearn-onnx* is using single floats by default but for this particular model, it is better to use double. Let's see how to create an ONNX file using doubles. -.. contents:: - :local: - Train a model +++++++++++++ A very basic example using *GaussianProcessRegressor* on the Boston dataset. -.. GENERATED FROM PYTHON SOURCE LINES 27-47 +.. GENERATED FROM PYTHON SOURCE LINES 24-44 .. code-block:: default import pprint import numpy import sklearn - from sklearn.datasets import load_boston + from sklearn.datasets import load_diabetes from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import DotProduct, RBF from sklearn.model_selection import train_test_split @@ -57,8 +54,8 @@ on the Boston dataset. from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType from skl2onnx import convert_sklearn - bost = load_boston() - X, y = bost.data, bost.target + dataset = load_diabetes() + X, y = dataset.data, dataset.target X_train, X_test, y_train, y_test = train_test_split(X, y) gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.) gpr.fit(X_train, y_train) @@ -70,17 +67,19 @@ on the Boston dataset. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none + /home/xadupre/github/scikit-learn/sklearn/gaussian_process/kernels.py:430: ConvergenceWarning: The optimal value found for dimension 0 of parameter k1__sigma_0 is close to the specified upper bound 100000.0. Increasing the bound and calling fit again may find a better value. + warnings.warn( + /home/xadupre/github/scikit-learn/sklearn/gaussian_process/kernels.py:420: ConvergenceWarning: The optimal value found for dimension 0 of parameter k2__length_scale is close to the specified lower bound 1e-05. Decreasing the bound and calling fit again may find a better value. + warnings.warn( GaussianProcessRegressor(alpha=1.0, kernel=DotProduct(sigma_0=1) + RBF(length_scale=1)) -.. GENERATED FROM PYTHON SOURCE LINES 48-53 +.. GENERATED FROM PYTHON SOURCE LINES 45-50 First attempt to convert a model into ONNX ++++++++++++++++++++++++++++++++++++++++++ @@ -88,7 +87,7 @@ First attempt to convert a model into ONNX The documentation suggests the following way to convert a model into ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 53-65 +.. GENERATED FROM PYTHON SOURCE LINES 50-62 .. code-block:: default @@ -111,7 +110,7 @@ convert a model into ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 66-78 +.. GENERATED FROM PYTHON SOURCE LINES 63-75 Second attempt: variable dimensions +++++++++++++++++++++++++++++++++++ @@ -126,7 +125,7 @@ We need to disable these checkings by replacing the fixed dimensions by an empty value. (see next line). -.. GENERATED FROM PYTHON SOURCE LINES 78-91 +.. GENERATED FROM PYTHON SOURCE LINES 75-88 .. code-block:: default @@ -149,24 +148,22 @@ the fixed dimensions by an empty value. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [14.91271822 9.65190241 10.75051854 47.18150916 14.42070831 22.22280927 - 25.9103425 12.4559437 14.0142188 25.53928326] - [14.625] + [155.86157227 161.37359619 169.98284912 163.67077637 179.60345459 + 112.92480469 140.47344971 107.94818115 131.79040527 178.3425293 ] + [270336.] -.. GENERATED FROM PYTHON SOURCE LINES 92-95 +.. GENERATED FROM PYTHON SOURCE LINES 89-92 The differences seems quite important. Let's confirm that by looking at the biggest differences. -.. GENERATED FROM PYTHON SOURCE LINES 95-101 +.. GENERATED FROM PYTHON SOURCE LINES 92-98 .. code-block:: default @@ -182,17 +179,16 @@ differences. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [4.52496092 4.70112771 4.8192842 4.98651983 5.57457671] - min(Y)-max(Y): 5.6 50.0 + [270230.27374268 270231.13311768 270231.22369385 270233.97131348 + 270236.3505249 ] + min(Y)-max(Y): 42.0 346.0 -.. GENERATED FROM PYTHON SOURCE LINES 102-118 +.. GENERATED FROM PYTHON SOURCE LINES 99-115 Third attempt: use of double ++++++++++++++++++++++++++++ @@ -211,7 +207,7 @@ tells the conversion function that every real constant matrix such as the trained coefficients will be dumped as doubles and not as floats anymore. -.. GENERATED FROM PYTHON SOURCE LINES 118-128 +.. GENERATED FROM PYTHON SOURCE LINES 115-125 .. code-block:: default @@ -231,20 +227,18 @@ will be dumped as doubles and not as floats anymore. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [14.91271823] + [155.86221577] -.. GENERATED FROM PYTHON SOURCE LINES 129-130 +.. GENERATED FROM PYTHON SOURCE LINES 126-127 The new differences look much better. -.. GENERATED FROM PYTHON SOURCE LINES 130-136 +.. GENERATED FROM PYTHON SOURCE LINES 127-133 .. code-block:: default @@ -260,18 +254,15 @@ The new differences look much better. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [1.35111407e-08 1.40779797e-08 1.42057797e-08 1.46704888e-08 - 1.64140879e-08] - min(Y)-max(Y): 5.6 50.0 + [0.0048411 0.00493731 0.00496565 0.00522617 0.00536676] + min(Y)-max(Y): 42.0 346.0 -.. GENERATED FROM PYTHON SOURCE LINES 137-143 +.. GENERATED FROM PYTHON SOURCE LINES 134-140 Size increase +++++++++++++ @@ -280,7 +271,7 @@ As a result, the ONNX model is almost twice bigger because every coefficient is stored as double and and not as floats anymore. -.. GENERATED FROM PYTHON SOURCE LINES 143-149 +.. GENERATED FROM PYTHON SOURCE LINES 140-146 .. code-block:: default @@ -296,17 +287,15 @@ and not as floats anymore. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - ONNX with floats: 42946 - ONNX with doubles: 83950 + ONNX with floats: 29814 + ONNX with doubles: 57694 -.. GENERATED FROM PYTHON SOURCE LINES 150-161 +.. GENERATED FROM PYTHON SOURCE LINES 147-158 return_std=True +++++++++++++++ @@ -320,7 +309,7 @@ The converter needs to know that an extended graph is required. That's done through the option mechanism (see :ref:`l-conv-options`). -.. GENERATED FROM PYTHON SOURCE LINES 161-170 +.. GENERATED FROM PYTHON SOURCE LINES 158-167 .. code-block:: default @@ -340,14 +329,14 @@ That's done through the option mechanism -.. GENERATED FROM PYTHON SOURCE LINES 171-175 +.. GENERATED FROM PYTHON SOURCE LINES 168-172 This error highlights the fact that the *scikit-learn* computes internal variables on first call to method predict. The converter needs them to be initialized by calling method predict at least once and then converting again. -.. GENERATED FROM PYTHON SOURCE LINES 175-185 +.. GENERATED FROM PYTHON SOURCE LINES 172-182 .. code-block:: default @@ -367,25 +356,24 @@ predict at least once and then converting again. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [array([[14.91271823], - [ 9.65190241], - [10.75051853], - [47.18150915], - [14.42070831]]), - array([1.03188297, 1.04084264, 0.99014271, 0.79255243, 1.07089793])] + [array([[155.86221577], + [161.37604364], + [169.97800802], + [163.66931185], + [179.59932504]]), + array([589.99334399, 0. , 0. , 502.71732585, + 0. ])] -.. GENERATED FROM PYTHON SOURCE LINES 186-187 +.. GENERATED FROM PYTHON SOURCE LINES 183-184 Let's compare with *scikit-learn* prediction. -.. GENERATED FROM PYTHON SOURCE LINES 187-190 +.. GENERATED FROM PYTHON SOURCE LINES 184-187 .. code-block:: default @@ -398,21 +386,20 @@ Let's compare with *scikit-learn* prediction. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (array([14.91271822, 9.65190241, 10.75051854, 47.18150916, 14.42070831]), - array([1.03168158, 1.04083401, 0.98876392, 0.79254442, 1.07066117])) + (array([155.86157227, 161.37359619, 169.98284912, 163.67077637, + 179.60174561]), + array([1.01325219, 1.00510439, 1.00620633, 1.0110021 , 1.00937738])) -.. GENERATED FROM PYTHON SOURCE LINES 191-192 +.. GENERATED FROM PYTHON SOURCE LINES 188-189 It looks good. Let's do a better checks. -.. GENERATED FROM PYTHON SOURCE LINES 192-202 +.. GENERATED FROM PYTHON SOURCE LINES 189-199 .. code-block:: default @@ -432,22 +419,20 @@ It looks good. Let's do a better checks. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [0.00129866 0.00137879 0.00138342 0.00150727 0.00181798] + [717.44499291 726.51668442 729.78735343 737.89521503 840.48277943] -.. GENERATED FROM PYTHON SOURCE LINES 203-206 +.. GENERATED FROM PYTHON SOURCE LINES 200-203 There are some discrepencies but it seems reasonable. **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 206-212 +.. GENERATED FROM PYTHON SOURCE LINES 203-209 .. code-block:: default @@ -463,15 +448,13 @@ There are some discrepencies but it seems reasonable. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -479,35 +462,23 @@ There are some discrepencies but it seems reasonable. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.528 seconds) + **Total running time of the script:** ( 0 minutes 1.592 seconds) .. _sphx_glr_download_auto_examples_plot_gpr.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_gpr.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gpr.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gpr.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gpr.ipynb ` + :download:`Download Jupyter notebook: plot_gpr.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_intermediate_outputs.rst.txt b/_sources/auto_examples/plot_intermediate_outputs.rst.txt index 54d44d16c..ef3e5674b 100644 --- a/_sources/auto_examples/plot_intermediate_outputs.rst.txt +++ b/_sources/auto_examples/plot_intermediate_outputs.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_intermediate_outputs.py" +.. "auto_examples/plot_intermediate_outputs.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -28,9 +28,6 @@ converter which is not correctly implemented. One option is to look into the output of every node of the ONNX graph. -.. contents:: - :local: - Create and train a complex pipeline +++++++++++++++++++++++++++++++++++ @@ -44,7 +41,7 @@ Operators-ml.md#ai.onnx.ml.Imputer>`_ does not handle string type. This cannot be part of the final ONNX pipeline and must be removed. Look for comment starting with ``---`` below. -.. GENERATED FROM PYTHON SOURCE LINES 31-92 +.. GENERATED FROM PYTHON SOURCE LINES 28-89 .. code-block:: default @@ -154,7 +151,7 @@ and must be removed. Look for comment starting with ``---`` below.

-.. GENERATED FROM PYTHON SOURCE LINES 93-99 +.. GENERATED FROM PYTHON SOURCE LINES 90-96 Define the inputs of the ONNX graph +++++++++++++++++++++++++++++++++++ @@ -163,7 +160,7 @@ Define the inputs of the ONNX graph but it needs to know which feature has which name. We simply reuse the dataframe column definition. -.. GENERATED FROM PYTHON SOURCE LINES 99-101 +.. GENERATED FROM PYTHON SOURCE LINES 96-98 .. code-block:: default @@ -175,8 +172,6 @@ We simply reuse the dataframe column definition. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none pclass int64 @@ -197,11 +192,11 @@ We simply reuse the dataframe column definition. -.. GENERATED FROM PYTHON SOURCE LINES 102-103 +.. GENERATED FROM PYTHON SOURCE LINES 99-100 After conversion. -.. GENERATED FROM PYTHON SOURCE LINES 103-124 +.. GENERATED FROM PYTHON SOURCE LINES 100-121 .. code-block:: default @@ -232,8 +227,6 @@ After conversion. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [('pclass', Int64TensorType(shape=[None, 1])), @@ -253,18 +246,18 @@ After conversion. -.. GENERATED FROM PYTHON SOURCE LINES 125-128 +.. GENERATED FROM PYTHON SOURCE LINES 122-125 Merging single column into vectors is not the most efficient way to compute the prediction. It could be done before converting the pipeline into a graph. -.. GENERATED FROM PYTHON SOURCE LINES 130-132 +.. GENERATED FROM PYTHON SOURCE LINES 127-129 Convert the pipeline into ONNX ++++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 132-139 +.. GENERATED FROM PYTHON SOURCE LINES 129-136 .. code-block:: default @@ -282,13 +275,13 @@ Convert the pipeline into ONNX -.. GENERATED FROM PYTHON SOURCE LINES 140-143 +.. GENERATED FROM PYTHON SOURCE LINES 137-140 *scikit-learn* does implicit conversions when it can. *sklearn-onnx* does not. The ONNX version of *OneHotEncoder* must be applied on columns of the same type. -.. GENERATED FROM PYTHON SOURCE LINES 143-158 +.. GENERATED FROM PYTHON SOURCE LINES 140-155 .. code-block:: default @@ -314,7 +307,7 @@ must be applied on columns of the same type. -.. GENERATED FROM PYTHON SOURCE LINES 159-165 +.. GENERATED FROM PYTHON SOURCE LINES 156-162 Compare the predictions +++++++++++++++++++++++ @@ -323,7 +316,7 @@ Final step, we need to ensure the converted model produces the same predictions, labels and probabilities. Let's start with *scikit-learn*. -.. GENERATED FROM PYTHON SOURCE LINES 165-169 +.. GENERATED FROM PYTHON SOURCE LINES 162-166 .. code-block:: default @@ -337,17 +330,15 @@ Let's start with *scikit-learn*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [0 1 0 0 0] - predict_proba [[0.82091143 0.17908857]] + predict [0 1 0 1 0] + predict_proba [[0.76688174 0.23311826]] -.. GENERATED FROM PYTHON SOURCE LINES 170-179 +.. GENERATED FROM PYTHON SOURCE LINES 167-176 Predictions with onnxruntime. We need to remove the dropped columns and to change @@ -359,7 +350,7 @@ Last detail, every column was described not really as a vector but as a matrix of one column which explains the last line with the *reshape*. -.. GENERATED FROM PYTHON SOURCE LINES 179-187 +.. GENERATED FROM PYTHON SOURCE LINES 176-184 .. code-block:: default @@ -378,11 +369,11 @@ with the *reshape*. -.. GENERATED FROM PYTHON SOURCE LINES 188-189 +.. GENERATED FROM PYTHON SOURCE LINES 185-186 We are ready to run *onnxruntime*. -.. GENERATED FROM PYTHON SOURCE LINES 189-196 +.. GENERATED FROM PYTHON SOURCE LINES 186-193 .. code-block:: default @@ -399,17 +390,15 @@ We are ready to run *onnxruntime*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none predict [0 1 0 0 0] - predict_proba [{0: 0.9220679998397827, 1: 0.07793202996253967}] + predict_proba [{0: 0.9036112427711487, 1: 0.09638875722885132}] -.. GENERATED FROM PYTHON SOURCE LINES 197-204 +.. GENERATED FROM PYTHON SOURCE LINES 194-201 Compute intermediate outputs ++++++++++++++++++++++++++++ @@ -419,7 +408,7 @@ Unfortunately, there is actually no way to ask We need to modifies the *ONNX* before it is given to *onnxruntime*. Let's see first the list of intermediate output. -.. GENERATED FROM PYTHON SOURCE LINES 204-209 +.. GENERATED FROM PYTHON SOURCE LINES 201-206 .. code-block:: default @@ -434,33 +423,27 @@ Let's see first the list of intermediate output. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - pclassout - sexout + merged_columns embarkedout - fare_cast - age_cast + sexout + pclassout concat_result - merged_columns variable variable2 variable1 - variable2_cast - variable1_cast transformed_column label probability_tensor - probabilities output_label + probabilities output_probability -.. GENERATED FROM PYTHON SOURCE LINES 210-216 +.. GENERATED FROM PYTHON SOURCE LINES 207-213 Not that easy to tell which one is what as the *ONNX* has more operators than the original *scikit-learn* pipelines. @@ -469,7 +452,7 @@ helps up to find the outputs of both numerical and textual pipeline: *variable1*, *variable2*. Let's look into the numerical pipeline first. -.. GENERATED FROM PYTHON SOURCE LINES 216-220 +.. GENERATED FROM PYTHON SOURCE LINES 213-217 .. code-block:: default @@ -483,20 +466,18 @@ Let's look into the numerical pipeline first. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - b'\x08\x07\x12\x08skl2onnx\x1a\x061.11.2"\x07ai.onnx(\x002\x00:\xae\x04\n^\n\x08variable\x12\tvariable1\x1a\x06Scaler"\x06Scaler*\x15\n\x06offset=\xb8\xe3\xeaA=\x90\xa0\x08B\xa0\x01\x06*\x14\n\x05scale=1\x96\x9d==\xb2_\x97<\xa0\x01\x06:\nai.onnx.ml\n}\n\x0emerged_columns\x12\x08variable\x1a\x07Imputer"\x07Imputer*#\n\x14imputed_value_floats=\x00\x00\xe0A=\x00\x00hA\xa0\x01\x06*\x1e\n\x14replaced_value_float\x15\x00\x00\xc0\x7f\xa0\x01\x01:\nai.onnx.ml\nD\n\x08age_cast\n\tfare_cast\x12\x0emerged_columns\x1a\x06Concat"\x06Concat*\x0b\n\x04axis\x18\x01\xa0\x01\x02:\x00\n(\n\x03age\x12\x08age_cast\x1a\x04Cast"\x04Cast*\t\n\x02to\x18\x01\xa0\x01\x02:\x00\n+\n\x04fare\x12\tfare_cast\x1a\x05Cast1"\x04Cast*\t\n\x02to\x18\x01\xa0\x01\x02:\x00\x12\x10pipeline_titanic*\x1f\x08\x02\x10\x07:\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\tB\x0cshape_tensorZ\x16\n\x06pclass\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03sex\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03age\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x14\n\x04fare\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x18\n\x08embarked\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01b\x0b\n\tvariable1B\x04\n\x00\x10\x0bB\x0e\n\nai.onnx.ml\x10\x01' + b'\x08\x07\x12\x08skl2onnx\x1a\x061.14.0"\x07ai.onnx(\x002\x00:\xcd\x03\n:\n\x03age\n\x04fare\x12\x0emerged_columns\x1a\x06Concat"\x06Concat*\x0b\n\x04axis\x18\x01\xa0\x01\x02:\x00\n}\n\x0emerged_columns\x12\x08variable\x1a\x07Imputer"\x07Imputer*#\n\x14imputed_value_floats=\x00\x00\xe0A=\xcdLgA\xa0\x01\x06*\x1e\n\x14replaced_value_float\x15\x00\x00\xc0\x7f\xa0\x01\x01:\nai.onnx.ml\n^\n\x08variable\x12\tvariable1\x1a\x06Scaler"\x06Scaler*\x15\n\x06offset=l\xde\xebA=J\xad\x07B\xa0\x01\x06*\x14\n\x05scale=\x88w\x9b==\x98\xca\x97<\xa0\x01\x06:\nai.onnx.ml\x12\x10pipeline_titanic*\x1f\x08\x02\x10\x07:\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\tB\x0cshape_tensorZ\x16\n\x06pclass\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03sex\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03age\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x14\n\x04fare\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x18\n\x08embarked\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01b\x0b\n\tvariable1B\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x0b' -.. GENERATED FROM PYTHON SOURCE LINES 221-222 +.. GENERATED FROM PYTHON SOURCE LINES 218-219 Let's compute the numerical features. -.. GENERATED FROM PYTHON SOURCE LINES 222-227 +.. GENERATED FROM PYTHON SOURCE LINES 219-224 .. code-block:: default @@ -511,20 +492,18 @@ Let's compute the numerical features. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numerical features [[-0.02779241 -0.48240793]] + numerical features [[ 0.19102357 -0.4848954 ]] -.. GENERATED FROM PYTHON SOURCE LINES 228-229 +.. GENERATED FROM PYTHON SOURCE LINES 225-226 We do the same for the textual features. -.. GENERATED FROM PYTHON SOURCE LINES 229-237 +.. GENERATED FROM PYTHON SOURCE LINES 226-234 .. code-block:: default @@ -542,27 +521,39 @@ We do the same for the textual features. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 doc_string: "" graph { node { - input: "pclass" - output: "pclassout" - name: "OneHotEncoder2" + input: "age" + input: "fare" + output: "merged_columns" + name: "Concat" + op_type: "Concat" + attribute { + name: "axis" + i: 1 + type: INT + } + domain: "" + } + node { + input: "embarked" + output: "embarkedout" + name: "OneHotEncoder" op_type: "OneHotEncoder" attribute { name: "cats_strings" - strings: "1" - strings: "2" - strings: "3" + strings: "C" + strings: "Q" + strings: "S" + strings: "missing" type: STRINGS } attribute { @@ -591,16 +582,15 @@ We do the same for the textual features. domain: "ai.onnx.ml" } node { - input: "embarked" - output: "embarkedout" - name: "OneHotEncoder" + input: "pclass" + output: "pclassout" + name: "OneHotEncoder2" op_type: "OneHotEncoder" attribute { name: "cats_strings" - strings: "C" - strings: "Q" - strings: "S" - strings: "missing" + strings: "1" + strings: "2" + strings: "3" type: STRINGS } attribute { @@ -610,30 +600,6 @@ We do the same for the textual features. } domain: "ai.onnx.ml" } - node { - input: "fare" - output: "fare_cast" - name: "Cast1" - op_type: "Cast" - attribute { - name: "to" - i: 1 - type: INT - } - domain: "" - } - node { - input: "age" - output: "age_cast" - name: "Cast" - op_type: "Cast" - attribute { - name: "to" - i: 1 - type: INT - } - domain: "" - } node { input: "embarkedout" input: "sexout" @@ -648,19 +614,6 @@ We do the same for the textual features. } domain: "" } - node { - input: "age_cast" - input: "fare_cast" - output: "merged_columns" - name: "Concat" - op_type: "Concat" - attribute { - name: "axis" - i: 1 - type: INT - } - domain: "" - } node { input: "merged_columns" output: "variable" @@ -669,7 +622,7 @@ We do the same for the textual features. attribute { name: "imputed_value_floats" floats: 28.0 - floats: 14.5 + floats: 14.456250190734863 type: FLOATS } attribute { @@ -694,45 +647,21 @@ We do the same for the textual features. op_type: "Scaler" attribute { name: "offset" - floats: 29.361190795898438 - floats: 34.15679931640625 + floats: 29.483604431152344 + floats: 33.919227600097656 type: FLOATS } attribute { name: "scale" - floats: 0.07694662362337112 - floats: 0.01847824826836586 + floats: 0.07591158151626587 + floats: 0.018529221415519714 type: FLOATS } domain: "ai.onnx.ml" } - node { - input: "variable2" - output: "variable2_cast" - name: "Cast3" - op_type: "Cast" - attribute { - name: "to" - i: 1 - type: INT - } - domain: "" - } node { input: "variable1" - output: "variable1_cast" - name: "Cast2" - op_type: "Cast" - attribute { - name: "to" - i: 1 - type: INT - } - domain: "" - } - node { - input: "variable1_cast" - input: "variable2_cast" + input: "variable2" output: "transformed_column" name: "Concat2" op_type: "Concat" @@ -757,34 +686,34 @@ We do the same for the textual features. } attribute { name: "coefficients" - floats: 0.39770302176475525 - floats: -0.07411571592092514 - floats: -0.3739332854747772 - floats: 0.18564870953559875 - floats: 0.4129030704498291 - floats: -0.22461935877799988 - floats: -1.2912691831588745 - floats: 1.2912683486938477 - floats: -0.9662848711013794 - floats: 0.018037281930446625 - floats: 0.9482467770576477 - floats: -0.39770302176475525 - floats: 0.07411571592092514 - floats: 0.3739332854747772 - floats: -0.18564870953559875 - floats: -0.4129030704498291 - floats: 0.22461935877799988 - floats: 1.2912691831588745 - floats: -1.2912683486938477 - floats: 0.9662848711013794 - floats: -0.018037281930446625 - floats: -0.9482467770576477 + floats: 0.4584773778915405 + floats: 0.025607185438275337 + floats: -0.3215447962284088 + floats: 0.1725417822599411 + floats: 0.3893167972564697 + floats: -0.24189656972885132 + floats: -1.226542353630066 + floats: 1.2249596118927002 + floats: -1.034958839416504 + floats: -0.013846348039805889 + floats: 1.047222375869751 + floats: -0.4584773778915405 + floats: -0.025607185438275337 + floats: 0.3215447962284088 + floats: -0.1725417822599411 + floats: -0.3893167972564697 + floats: 0.24189656972885132 + floats: 1.226542353630066 + floats: -1.2249596118927002 + floats: 1.034958839416504 + floats: 0.013846348039805889 + floats: -1.047222375869751 type: FLOATS } attribute { name: "intercepts" - floats: -0.20633748173713684 - floats: 0.20633748173713684 + floats: -0.2818778455257416 + floats: 0.2818778455257416 type: FLOATS } attribute { @@ -799,6 +728,18 @@ We do the same for the textual features. } domain: "ai.onnx.ml" } + node { + input: "label" + output: "output_label" + name: "Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } node { input: "probability_tensor" output: "probabilities" @@ -811,18 +752,6 @@ We do the same for the textual features. } domain: "ai.onnx.ml" } - node { - input: "label" - output: "output_label" - name: "Cast4" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } node { input: "probabilities" output: "output_probability" @@ -949,28 +878,28 @@ We do the same for the textual features. } } } - opset_import { - domain: "" - version: 11 - } opset_import { domain: "ai.onnx.ml" version: 1 } + opset_import { + domain: "" + version: 11 + } - textual features [[0. 0. 1. 0. 0. 1. 0. 0. 1.]] + textual features [[0. 1. 0. 0. 0. 1. 0. 0. 1.]] -.. GENERATED FROM PYTHON SOURCE LINES 238-242 +.. GENERATED FROM PYTHON SOURCE LINES 235-239 Display the sub-ONNX graph ++++++++++++++++++++++++++ Finally, let's see both subgraphs. First, numerical pipeline. -.. GENERATED FROM PYTHON SOURCE LINES 242-256 +.. GENERATED FROM PYTHON SOURCE LINES 239-253 .. code-block:: default @@ -999,20 +928,18 @@ Finally, let's see both subgraphs. First, numerical pipeline. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 4372.5, 1033.5, -0.5) + (-0.5, 1229.5, 2558.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 257-258 +.. GENERATED FROM PYTHON SOURCE LINES 254-255 Then textual pipeline. -.. GENERATED FROM PYTHON SOURCE LINES 258-272 +.. GENERATED FROM PYTHON SOURCE LINES 255-269 .. code-block:: default @@ -1041,20 +968,18 @@ Then textual pipeline. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 7086.5, 1121.5, -0.5) + (-0.5, 5630.5, 2735.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 273-274 +.. GENERATED FROM PYTHON SOURCE LINES 270-271 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 274-280 +.. GENERATED FROM PYTHON SOURCE LINES 271-277 .. code-block:: default @@ -1070,15 +995,13 @@ Then textual pipeline. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -1086,35 +1009,23 @@ Then textual pipeline. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 3.518 seconds) + **Total running time of the script:** ( 0 minutes 4.105 seconds) .. _sphx_glr_download_auto_examples_plot_intermediate_outputs.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_intermediate_outputs.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_intermediate_outputs.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_intermediate_outputs.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_intermediate_outputs.ipynb ` + :download:`Download Jupyter notebook: plot_intermediate_outputs.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_investigate_pipeline.rst.txt b/_sources/auto_examples/plot_investigate_pipeline.rst.txt index a7703ba64..d12fc9ffb 100644 --- a/_sources/auto_examples/plot_investigate_pipeline.rst.txt +++ b/_sources/auto_examples/plot_investigate_pipeline.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_investigate_pipeline.py" +.. "auto_examples/plot_investigate_pipeline.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -24,9 +24,6 @@ Investigate a pipeline The following example shows how to look into a converted models and easily find errors at every step of the pipeline. -.. contents:: - :local: - Create a pipeline +++++++++++++++++ @@ -41,7 +38,7 @@ Operators-ml.md#ai.onnx.ml.Imputer>`_ does not handle string type. This cannot be part of the final ONNX pipeline and must be removed. Look for comment starting with ``---`` below. -.. GENERATED FROM PYTHON SOURCE LINES 28-54 +.. GENERATED FROM PYTHON SOURCE LINES 25-51 .. code-block:: default @@ -77,11 +74,9 @@ and must be removed. Look for comment starting with ``---`` below. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1): + /home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: @@ -99,12 +94,12 @@ and must be removed. Look for comment starting with ``---`` below.

-.. GENERATED FROM PYTHON SOURCE LINES 55-57 +.. GENERATED FROM PYTHON SOURCE LINES 52-54 Conversion to ONNX ++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 57-71 +.. GENERATED FROM PYTHON SOURCE LINES 54-68 .. code-block:: default @@ -128,29 +123,27 @@ Conversion to ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none skl predict_proba - [[9.99998536e-01 5.99063801e-19 3.48549300e-10 1.55765866e-08 - 3.32560027e-10 1.21314773e-06 3.98960248e-08 1.22513933e-07 - 2.23871298e-08 4.98148709e-08] - [1.47648539e-14 9.99999301e-01 1.05811971e-10 7.49298736e-13 - 2.48627484e-07 8.75686177e-12 5.39025200e-11 2.95899979e-11 - 4.50529114e-07 1.30607573e-13]] + [[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08 + 3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07 + 2.23871272e-08 4.98148509e-08] + [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13 + 2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11 + 4.50528833e-07 1.30607478e-13]] onnx predict_proba [[9.99998569e-01 5.99062501e-19 3.48550355e-10 1.55766493e-08 - 3.32561811e-10 1.21315361e-06 3.98961930e-08 1.22514706e-07 - 2.23872068e-08 4.98151529e-08] - [1.47648956e-14 9.99999285e-01 1.05811790e-10 7.49297488e-13 - 2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95900075e-11 + 3.32561811e-10 1.21315134e-06 3.98961930e-08 1.22514706e-07 + 2.23872494e-08 4.98151529e-08] + [1.47648956e-14 9.99999285e-01 1.05811991e-10 7.49297488e-13 + 2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95899520e-11 4.50529058e-07 1.30607344e-13]] -.. GENERATED FROM PYTHON SOURCE LINES 72-80 +.. GENERATED FROM PYTHON SOURCE LINES 69-77 Intermediate steps ++++++++++++++++++ @@ -161,7 +154,7 @@ is failing. The following method modifies the scikit-learn pipeline to steal the intermediate outputs and produces an smaller ONNX graph for every operator. -.. GENERATED FROM PYTHON SOURCE LINES 80-100 +.. GENERATED FROM PYTHON SOURCE LINES 77-97 .. code-block:: default @@ -191,8 +184,6 @@ an smaller ONNX graph for every operator. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none step 1 @@ -217,7 +208,7 @@ an smaller ONNX graph for every operator. -1.29285214e-01, -8.14384613e-02, -3.82919696e-02, -9.76885583e-03, -1.39046240e-02, 1.59100433e-03, -2.87444919e-03, 5.75119957e-03, 1.85595427e-03, - -5.00911047e-03, 1.73372230e-15, 2.65719735e-16, + -5.00911047e-03, -2.53068224e-15, -6.30369386e-16, -9.16970102e-16], [ 1.54267314e+01, -4.91291516e+00, 1.74676972e+01, -1.13960509e+01, 5.64555024e+00, -5.73696034e+00, @@ -239,7 +230,7 @@ an smaller ONNX graph for every operator. 1.91237218e-01, -4.73950435e-02, 2.74122911e-02, 4.32524378e-03, -3.66956686e-03, -1.88790754e-03, 5.22119207e-03, -1.86775268e-03, -5.07041881e-03, - -1.70805502e-03, -1.24937188e-15, -1.97574356e-16, + -1.70805502e-03, 1.87088367e-15, -3.01154459e-15, 2.24048193e-16]])} onnx outputs [array([[-9.78696918e+00, 7.22639418e+00, -2.16935596e+01, @@ -262,7 +253,7 @@ an smaller ONNX graph for every operator. -1.29285216e-01, -8.14384818e-02, -3.82919535e-02, -9.76885669e-03, -1.39046200e-02, 1.59100525e-03, -2.87444773e-03, 5.75120188e-03, 1.85595278e-03, - -5.00911009e-03, 1.73372186e-15, 2.65719874e-16, + -5.00911009e-03, -2.53068203e-15, -6.30369331e-16, -9.16970128e-16], [ 1.54267330e+01, -4.91291523e+00, 1.74676971e+01, -1.13960505e+01, 5.64554977e+00, -5.73695993e+00, @@ -284,33 +275,33 @@ an smaller ONNX graph for every operator. 1.91237196e-01, -4.73950393e-02, 2.74122953e-02, 4.32524411e-03, -3.66956298e-03, -1.88790704e-03, 5.22119273e-03, -1.86775194e-03, -5.07041626e-03, - -1.70805526e-03, -1.24937190e-15, -1.97574326e-16, + -1.70805526e-03, 1.87088423e-15, -3.01154475e-15, 2.24048182e-16]], dtype=float32)] step 1 skl outputs - {'decision_function': array([[9.99998536e-01, 5.99063801e-19, 3.48549300e-10, 1.55765866e-08, - 3.32560027e-10, 1.21314773e-06, 3.98960248e-08, 1.22513933e-07, - 2.23871298e-08, 4.98148709e-08], - [1.47648539e-14, 9.99999301e-01, 1.05811971e-10, 7.49298736e-13, - 2.48627484e-07, 8.75686177e-12, 5.39025200e-11, 2.95899979e-11, - 4.50529114e-07, 1.30607573e-13]]), 'predict_proba': array([[9.99998536e-01, 5.99063801e-19, 3.48549300e-10, 1.55765866e-08, - 3.32560027e-10, 1.21314773e-06, 3.98960248e-08, 1.22513933e-07, - 2.23871298e-08, 4.98148709e-08], - [1.47648539e-14, 9.99999301e-01, 1.05811971e-10, 7.49298736e-13, - 2.48627484e-07, 8.75686177e-12, 5.39025200e-11, 2.95899979e-11, - 4.50529114e-07, 1.30607573e-13]])} + {'decision_function': array([[9.99998536e-01, 5.99063158e-19, 3.48548953e-10, 1.55765726e-08, + 3.32559745e-10, 1.21314653e-06, 3.98959930e-08, 1.22513839e-07, + 2.23871272e-08, 4.98148509e-08], + [1.47648437e-14, 9.99999301e-01, 1.05811967e-10, 7.49298733e-13, + 2.48627417e-07, 8.75686484e-12, 5.39025135e-11, 2.95899938e-11, + 4.50528833e-07, 1.30607478e-13]]), 'predict_proba': array([[9.99998536e-01, 5.99063158e-19, 3.48548953e-10, 1.55765726e-08, + 3.32559745e-10, 1.21314653e-06, 3.98959930e-08, 1.22513839e-07, + 2.23871272e-08, 4.98148509e-08], + [1.47648437e-14, 9.99999301e-01, 1.05811967e-10, 7.49298733e-13, + 2.48627417e-07, 8.75686484e-12, 5.39025135e-11, 2.95899938e-11, + 4.50528833e-07, 1.30607478e-13]])} onnx outputs [array([0, 1], dtype=int64), array([[9.9999857e-01, 5.9906250e-19, 3.4855036e-10, 1.5576649e-08, - 3.3256181e-10, 1.2131536e-06, 3.9896193e-08, 1.2251471e-07, - 2.2387207e-08, 4.9815153e-08], - [1.4764896e-14, 9.9999928e-01, 1.0581179e-10, 7.4929749e-13, - 2.4862788e-07, 8.7568555e-12, 5.3902442e-11, 2.9590008e-11, + 3.3256181e-10, 1.2131513e-06, 3.9896193e-08, 1.2251471e-07, + 2.2387249e-08, 4.9815153e-08], + [1.4764896e-14, 9.9999928e-01, 1.0581199e-10, 7.4929749e-13, + 2.4862788e-07, 8.7568555e-12, 5.3902442e-11, 2.9589952e-11, 4.5052906e-07, 1.3060734e-13]], dtype=float32)] -.. GENERATED FROM PYTHON SOURCE LINES 101-108 +.. GENERATED FROM PYTHON SOURCE LINES 98-105 Pickle ++++++ @@ -320,7 +311,7 @@ It can be pickle independetly from the others. Attribute *_debug* contains all the information needed to *replay* the prediction of the model. -.. GENERATED FROM PYTHON SOURCE LINES 108-126 +.. GENERATED FROM PYTHON SOURCE LINES 105-123 .. code-block:: default @@ -348,25 +339,23 @@ needed to *replay* the prediction of the model. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[9.99998536e-01 5.99063801e-19 3.48549300e-10 1.55765866e-08 - 3.32560027e-10 1.21314773e-06 3.98960248e-08 1.22513933e-07 - 2.23871298e-08 4.98148709e-08] - [1.47648539e-14 9.99999301e-01 1.05811971e-10 7.49298736e-13 - 2.48627484e-07 8.75686177e-12 5.39025200e-11 2.95899979e-11 - 4.50529114e-07 1.30607573e-13]] + [[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08 + 3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07 + 2.23871272e-08 4.98148509e-08] + [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13 + 2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11 + 4.50528833e-07 1.30607478e-13]] -.. GENERATED FROM PYTHON SOURCE LINES 127-128 +.. GENERATED FROM PYTHON SOURCE LINES 124-125 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 128-134 +.. GENERATED FROM PYTHON SOURCE LINES 125-131 .. code-block:: default @@ -382,15 +371,13 @@ needed to *replay* the prediction of the model. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -398,35 +385,23 @@ needed to *replay* the prediction of the model. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.402 seconds) + **Total running time of the script:** ( 0 minutes 0.274 seconds) .. _sphx_glr_download_auto_examples_plot_investigate_pipeline.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_investigate_pipeline.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_investigate_pipeline.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_investigate_pipeline.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_investigate_pipeline.ipynb ` + :download:`Download Jupyter notebook: plot_investigate_pipeline.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_logging.rst.txt b/_sources/auto_examples/plot_logging.rst.txt index 69b0d1557..2a9b58619 100644 --- a/_sources/auto_examples/plot_logging.rst.txt +++ b/_sources/auto_examples/plot_logging.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_logging.py" +.. "auto_examples/plot_logging.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -28,17 +28,13 @@ associated converter. It may also fails if one of the object is mapped by a custom converter. If the error message is not explicit enough, it is possible to enable logging. - -.. contents:: - :local: - Train a model +++++++++++++ A very basic example using random forest and the iris dataset. -.. GENERATED FROM PYTHON SOURCE LINES 25-45 +.. GENERATED FROM PYTHON SOURCE LINES 21-41 .. code-block:: default @@ -68,8 +64,6 @@ the iris dataset. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none DecisionTreeClassifier() @@ -77,12 +71,12 @@ the iris dataset. -.. GENERATED FROM PYTHON SOURCE LINES 46-48 +.. GENERATED FROM PYTHON SOURCE LINES 42-44 Convert a model into ONNX +++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 48-61 +.. GENERATED FROM PYTHON SOURCE LINES 44-57 .. code-block:: default @@ -105,17 +99,15 @@ Convert a model into ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [2 1 1 0 1 0 1 0 1 1 2 1 0 0 0 1 0 0 2 0 0 1 0 0 2 2 1 2 0 2 0 1 0 2 2 1 2 - 2] + [1 0 1 1 2 0 2 0 2 0 1 1 1 0 2 0 2 1 2 1 2 0 0 0 2 0 0 0 2 1 2 1 1 0 2 2 2 + 1] -.. GENERATED FROM PYTHON SOURCE LINES 62-69 +.. GENERATED FROM PYTHON SOURCE LINES 58-65 Conversion with parameter verbose +++++++++++++++++++++++++++++++++ @@ -125,7 +117,7 @@ It tells which converter is called. `verbose=1` usually means what *skl2onnx* is doing to convert a pipeline. `verbose=2+` is reserved for information within converters. -.. GENERATED FROM PYTHON SOURCE LINES 69-72 +.. GENERATED FROM PYTHON SOURCE LINES 65-68 .. code-block:: default @@ -138,8 +130,6 @@ is reserved for information within converters. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [convert_sklearn] parse_sklearn_model @@ -152,13 +142,13 @@ is reserved for information within converters. [convert_operators] iteration 2 - n_vars=5 n_ops=2 [convert_operators] end iter: 2 - n_vars=5 [convert_operators] end. - [_update_domain_version] +opset 0: name='', version=9 - [_update_domain_version] +opset 1: name='ai.onnx.ml', version=1 + [_update_domain_version] +opset 0: name='ai.onnx.ml', version=1 + [_update_domain_version] +opset 1: name='', version=9 [convert_sklearn] end ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 doc_string: "" @@ -186,18 +176,6 @@ is reserved for information within converters. ints: 0 ints: 1 ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 type: INTS } attribute { @@ -205,30 +183,18 @@ is reserved for information within converters. ints: 1 ints: 1 ints: 1 - ints: 5 - ints: 5 - ints: 5 + ints: 4 + ints: 4 + ints: 4 ints: 6 ints: 6 ints: 6 + ints: 7 + ints: 7 + ints: 7 ints: 8 ints: 8 ints: 8 - ints: 10 - ints: 10 - ints: 10 - ints: 11 - ints: 11 - ints: 11 - ints: 14 - ints: 14 - ints: 14 - ints: 15 - ints: 15 - ints: 15 - ints: 16 - ints: 16 - ints: 16 type: INTS } attribute { @@ -248,18 +214,6 @@ is reserved for information within converters. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -274,23 +228,11 @@ is reserved for information within converters. floats: 0.0 floats: 1.0 floats: 0.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 floats: 1.0 floats: 0.0 floats: 0.0 floats: 0.0 floats: 1.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 type: FLOATS } attribute { @@ -304,18 +246,10 @@ is reserved for information within converters. name: "nodes_falsenodeids" ints: 2 ints: 0 - ints: 12 - ints: 7 - ints: 6 - ints: 0 - ints: 0 - ints: 9 - ints: 0 - ints: 11 - ints: 0 + ints: 8 + ints: 5 ints: 0 - ints: 16 - ints: 15 + ints: 7 ints: 0 ints: 0 ints: 0 @@ -323,20 +257,12 @@ is reserved for information within converters. } attribute { name: "nodes_featureids" - ints: 3 - ints: 0 - ints: 3 ints: 2 - ints: 3 - ints: 0 ints: 0 ints: 3 - ints: 0 - ints: 2 - ints: 0 - ints: 0 ints: 2 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 @@ -353,14 +279,6 @@ is reserved for information within converters. floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 type: FLOATS } attribute { @@ -374,14 +292,6 @@ is reserved for information within converters. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -390,16 +300,8 @@ is reserved for information within converters. strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "LEAF" @@ -416,14 +318,6 @@ is reserved for information within converters. ints: 6 ints: 7 ints: 8 - ints: 9 - ints: 10 - ints: 11 - ints: 12 - ints: 13 - ints: 14 - ints: 15 - ints: 16 type: INTS } attribute { @@ -437,14 +331,6 @@ is reserved for information within converters. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -453,16 +339,8 @@ is reserved for information within converters. ints: 0 ints: 3 ints: 4 - ints: 5 ints: 0 - ints: 0 - ints: 8 - ints: 0 - ints: 10 - ints: 0 - ints: 0 - ints: 13 - ints: 14 + ints: 6 ints: 0 ints: 0 ints: 0 @@ -470,20 +348,12 @@ is reserved for information within converters. } attribute { name: "nodes_values" - floats: 0.800000011920929 + floats: 2.4499998092651367 floats: 0.0 floats: 1.75 floats: 4.949999809265137 - floats: 1.649999976158142 floats: 0.0 - floats: 0.0 - floats: 1.5499999523162842 - floats: 0.0 - floats: 5.449999809265137 - floats: 0.0 - floats: 0.0 - floats: 4.849999904632568 - floats: 5.949999809265137 + floats: 2.799999952316284 floats: 0.0 floats: 0.0 floats: 0.0 @@ -496,18 +366,6 @@ is reserved for information within converters. } domain: "ai.onnx.ml" } - node { - input: "label" - output: "output_label" - name: "Cast" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } node { input: "probabilities" output: "output_probability" @@ -522,7 +380,19 @@ is reserved for information within converters. } domain: "ai.onnx.ml" } - name: "59c8c63b49ab4ac29a457be5b1d38b67" + node { + input: "label" + output: "output_label" + name: "Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } + name: "f5254896c3294cbca33a33798809c1db" input { name: "float_input" type { @@ -568,19 +438,19 @@ is reserved for information within converters. } } } - opset_import { - domain: "" - version: 9 - } opset_import { domain: "ai.onnx.ml" version: 1 } + opset_import { + domain: "" + version: 9 + } -.. GENERATED FROM PYTHON SOURCE LINES 73-80 +.. GENERATED FROM PYTHON SOURCE LINES 69-76 Conversion with logging +++++++++++++++++++++++ @@ -590,7 +460,7 @@ This is very detailed logging. It which operators or variables This information may be useful when a custom converter is being implemented. -.. GENERATED FROM PYTHON SOURCE LINES 80-87 +.. GENERATED FROM PYTHON SOURCE LINES 76-83 .. code-block:: default @@ -607,8 +477,6 @@ implemented. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none DEBUG:skl2onnx:[Var] +Variable('float_input', 'float_input', type=FloatTensorType(shape=[None, 4])) @@ -663,7 +531,7 @@ implemented. ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 doc_string: "" @@ -691,18 +559,6 @@ implemented. ints: 0 ints: 1 ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 type: INTS } attribute { @@ -710,30 +566,18 @@ implemented. ints: 1 ints: 1 ints: 1 - ints: 5 - ints: 5 - ints: 5 + ints: 4 + ints: 4 + ints: 4 ints: 6 ints: 6 ints: 6 + ints: 7 + ints: 7 + ints: 7 ints: 8 ints: 8 ints: 8 - ints: 10 - ints: 10 - ints: 10 - ints: 11 - ints: 11 - ints: 11 - ints: 14 - ints: 14 - ints: 14 - ints: 15 - ints: 15 - ints: 15 - ints: 16 - ints: 16 - ints: 16 type: INTS } attribute { @@ -753,18 +597,6 @@ implemented. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -779,20 +611,8 @@ implemented. floats: 0.0 floats: 1.0 floats: 0.0 - floats: 0.0 floats: 1.0 floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 floats: 0.0 floats: 0.0 floats: 1.0 @@ -809,18 +629,10 @@ implemented. name: "nodes_falsenodeids" ints: 2 ints: 0 - ints: 12 - ints: 7 - ints: 6 - ints: 0 - ints: 0 - ints: 9 - ints: 0 - ints: 11 - ints: 0 + ints: 8 + ints: 5 ints: 0 - ints: 16 - ints: 15 + ints: 7 ints: 0 ints: 0 ints: 0 @@ -828,20 +640,12 @@ implemented. } attribute { name: "nodes_featureids" - ints: 3 - ints: 0 - ints: 3 ints: 2 - ints: 3 - ints: 0 ints: 0 ints: 3 - ints: 0 - ints: 2 - ints: 0 - ints: 0 ints: 2 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 @@ -858,14 +662,6 @@ implemented. floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 type: FLOATS } attribute { @@ -879,14 +675,6 @@ implemented. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -895,17 +683,9 @@ implemented. strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "LEAF" type: STRINGS @@ -921,14 +701,6 @@ implemented. ints: 6 ints: 7 ints: 8 - ints: 9 - ints: 10 - ints: 11 - ints: 12 - ints: 13 - ints: 14 - ints: 15 - ints: 16 type: INTS } attribute { @@ -942,14 +714,6 @@ implemented. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -958,16 +722,8 @@ implemented. ints: 0 ints: 3 ints: 4 - ints: 5 - ints: 0 ints: 0 - ints: 8 - ints: 0 - ints: 10 - ints: 0 - ints: 0 - ints: 13 - ints: 14 + ints: 6 ints: 0 ints: 0 ints: 0 @@ -975,20 +731,12 @@ implemented. } attribute { name: "nodes_values" - floats: 0.800000011920929 + floats: 2.4499998092651367 floats: 0.0 floats: 1.75 floats: 4.949999809265137 - floats: 1.649999976158142 - floats: 0.0 - floats: 0.0 - floats: 1.5499999523162842 floats: 0.0 - floats: 5.449999809265137 - floats: 0.0 - floats: 0.0 - floats: 4.849999904632568 - floats: 5.949999809265137 + floats: 2.799999952316284 floats: 0.0 floats: 0.0 floats: 0.0 @@ -1001,18 +749,6 @@ implemented. } domain: "ai.onnx.ml" } - node { - input: "label" - output: "output_label" - name: "Cast" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } node { input: "probabilities" output: "output_probability" @@ -1027,7 +763,19 @@ implemented. } domain: "ai.onnx.ml" } - name: "f4198d55cb1d476d9b780b2c4246c041" + node { + input: "label" + output: "output_label" + name: "Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } + name: "8405ded92a3940539213ca75ace4f64e" input { name: "float_input" type { @@ -1073,23 +821,23 @@ implemented. } } } - opset_import { - domain: "" - version: 9 - } opset_import { domain: "ai.onnx.ml" version: 1 } + opset_import { + domain: "" + version: 9 + } -.. GENERATED FROM PYTHON SOURCE LINES 88-89 +.. GENERATED FROM PYTHON SOURCE LINES 84-85 And to disable it. -.. GENERATED FROM PYTHON SOURCE LINES 89-96 +.. GENERATED FROM PYTHON SOURCE LINES 85-92 .. code-block:: default @@ -1106,14 +854,12 @@ And to disable it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 doc_string: "" @@ -1141,18 +887,6 @@ And to disable it. ints: 0 ints: 1 ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 - ints: 0 - ints: 1 - ints: 2 type: INTS } attribute { @@ -1160,30 +894,18 @@ And to disable it. ints: 1 ints: 1 ints: 1 - ints: 5 - ints: 5 - ints: 5 + ints: 4 + ints: 4 + ints: 4 ints: 6 ints: 6 ints: 6 + ints: 7 + ints: 7 + ints: 7 ints: 8 ints: 8 ints: 8 - ints: 10 - ints: 10 - ints: 10 - ints: 11 - ints: 11 - ints: 11 - ints: 14 - ints: 14 - ints: 14 - ints: 15 - ints: 15 - ints: 15 - ints: 16 - ints: 16 - ints: 16 type: INTS } attribute { @@ -1203,18 +925,6 @@ And to disable it. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -1229,23 +939,11 @@ And to disable it. floats: 0.0 floats: 1.0 floats: 0.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 - floats: 0.0 floats: 1.0 floats: 0.0 floats: 0.0 floats: 0.0 floats: 1.0 - floats: 0.0 - floats: 0.0 - floats: 1.0 type: FLOATS } attribute { @@ -1259,18 +957,10 @@ And to disable it. name: "nodes_falsenodeids" ints: 2 ints: 0 - ints: 12 - ints: 7 - ints: 6 - ints: 0 - ints: 0 - ints: 9 - ints: 0 - ints: 11 - ints: 0 + ints: 8 + ints: 5 ints: 0 - ints: 16 - ints: 15 + ints: 7 ints: 0 ints: 0 ints: 0 @@ -1278,20 +968,12 @@ And to disable it. } attribute { name: "nodes_featureids" - ints: 3 - ints: 0 - ints: 3 ints: 2 - ints: 3 - ints: 0 ints: 0 ints: 3 - ints: 0 - ints: 2 - ints: 0 - ints: 0 ints: 2 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 @@ -1308,14 +990,6 @@ And to disable it. floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 type: FLOATS } attribute { @@ -1329,14 +1003,6 @@ And to disable it. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -1345,18 +1011,10 @@ And to disable it. strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" strings: "LEAF" type: STRINGS } @@ -1371,14 +1029,6 @@ And to disable it. ints: 6 ints: 7 ints: 8 - ints: 9 - ints: 10 - ints: 11 - ints: 12 - ints: 13 - ints: 14 - ints: 15 - ints: 16 type: INTS } attribute { @@ -1392,14 +1042,6 @@ And to disable it. ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -1408,16 +1050,8 @@ And to disable it. ints: 0 ints: 3 ints: 4 - ints: 5 - ints: 0 - ints: 0 - ints: 8 ints: 0 - ints: 10 - ints: 0 - ints: 0 - ints: 13 - ints: 14 + ints: 6 ints: 0 ints: 0 ints: 0 @@ -1425,20 +1059,12 @@ And to disable it. } attribute { name: "nodes_values" - floats: 0.800000011920929 + floats: 2.4499998092651367 floats: 0.0 floats: 1.75 floats: 4.949999809265137 - floats: 1.649999976158142 - floats: 0.0 - floats: 0.0 - floats: 1.5499999523162842 - floats: 0.0 - floats: 5.449999809265137 - floats: 0.0 floats: 0.0 - floats: 4.849999904632568 - floats: 5.949999809265137 + floats: 2.799999952316284 floats: 0.0 floats: 0.0 floats: 0.0 @@ -1451,18 +1077,6 @@ And to disable it. } domain: "ai.onnx.ml" } - node { - input: "label" - output: "output_label" - name: "Cast" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } node { input: "probabilities" output: "output_probability" @@ -1477,7 +1091,19 @@ And to disable it. } domain: "ai.onnx.ml" } - name: "b4a30720f5af4f539736e54e007cd4c2" + node { + input: "label" + output: "output_label" + name: "Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } + name: "92d6517f5fb64e68bc6614269bf01c28" input { name: "float_input" type { @@ -1523,23 +1149,23 @@ And to disable it. } } } - opset_import { - domain: "" - version: 9 - } opset_import { domain: "ai.onnx.ml" version: 1 } + opset_import { + domain: "" + version: 9 + } -.. GENERATED FROM PYTHON SOURCE LINES 97-98 +.. GENERATED FROM PYTHON SOURCE LINES 93-94 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 98-104 +.. GENERATED FROM PYTHON SOURCE LINES 94-100 .. code-block:: default @@ -1555,15 +1181,13 @@ And to disable it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -1571,35 +1195,23 @@ And to disable it. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.114 seconds) + **Total running time of the script:** ( 0 minutes 0.092 seconds) .. _sphx_glr_download_auto_examples_plot_logging.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_logging.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_logging.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_logging.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_logging.ipynb ` + :download:`Download Jupyter notebook: plot_logging.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_metadata.rst.txt b/_sources/auto_examples/plot_metadata.rst.txt index 110e3d13a..90d1c5b01 100644 --- a/_sources/auto_examples/plot_metadata.rst.txt +++ b/_sources/auto_examples/plot_metadata.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_metadata.py" +.. "auto_examples/plot_metadata.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -62,8 +62,6 @@ logistic regression model trained with .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none doc_string= @@ -102,8 +100,6 @@ With *ONNX Runtime*: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none custom_metadata_map={} @@ -136,15 +132,13 @@ With *ONNX Runtime*: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -152,35 +146,23 @@ With *ONNX Runtime*: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.082 seconds) + **Total running time of the script:** ( 0 minutes 0.077 seconds) .. _sphx_glr_download_auto_examples_plot_metadata.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_metadata.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_metadata.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_metadata.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_metadata.ipynb ` + :download:`Download Jupyter notebook: plot_metadata.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_nmf.rst.txt b/_sources/auto_examples/plot_nmf.rst.txt index c82cd5708..04b8bfcf4 100644 --- a/_sources/auto_examples/plot_nmf.rst.txt +++ b/_sources/auto_examples/plot_nmf.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_nmf.py" +.. "auto_examples/plot_nmf.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -34,13 +34,10 @@ The second case is more complex as it theoretically requires the estimation of a new matrix *W* with a gradient descent. -.. contents:: - :local: - Building a simple model +++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 28-61 +.. GENERATED FROM PYTHON SOURCE LINES 25-58 .. code-block:: default @@ -83,22 +80,20 @@ Building a simple model .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none original predictions - [(0, 0, 1.894058891169123), (0, 1, 0.10910649512440318), (0, 2, 0.30724384230279767), (0, 3, 0.30724384230279767), (1, 0, 1.0146702247021038), (1, 1, 0.984891447544794), (1, 2, 0.0), (1, 3, 0.0), (2, 0, 1.1066058655699118), (2, 1, 0.0), (2, 2, 0.19083275099868582), (2, 3, 0.19083275099868582), (3, 0, 1.1066058655699118), (3, 1, 0.0), (3, 2, 0.19083275099868582), (3, 3, 0.19083275099868582), (4, 0, 0.9470294455845615), (4, 1, 0.05455324756220159), (4, 2, 0.15362192115139883), (4, 3, 0.15362192115139883)] + [(0, 0, 1.8940573361824338), (0, 1, 0.30724412295249387), (0, 2, 0.30724412295249387), (0, 3, 0.10910977984398254), (1, 0, 1.106606957298055), (1, 1, 0.19083366185515177), (1, 2, 0.19083366185515177), (1, 3, 0.0), (2, 0, 1.106606957298055), (2, 1, 0.19083366185515177), (2, 2, 0.19083366185515177), (2, 3, 0.0), (3, 0, 1.0146708954543895), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848905236525014), (4, 0, 0.9470286680912169), (4, 1, 0.15362206147624693), (4, 2, 0.15362206147624693), (4, 3, 0.05455488992199127)] -.. GENERATED FROM PYTHON SOURCE LINES 62-64 +.. GENERATED FROM PYTHON SOURCE LINES 59-61 Let's rewrite the prediction in a way it is closer to the function we need to convert into ONNX. -.. GENERATED FROM PYTHON SOURCE LINES 64-78 +.. GENERATED FROM PYTHON SOURCE LINES 61-75 .. code-block:: default @@ -122,16 +117,14 @@ to the function we need to convert into ONNX. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [(0, 0, 1.894058891169123), (0, 1, 0.10910649512440318), (0, 2, 0.30724384230279767), (0, 3, 0.30724384230279767), (1, 0, 1.0146702247021038), (1, 1, 0.984891447544794), (1, 2, 0.0), (1, 3, 0.0), (2, 0, 1.1066058655699118), (2, 1, 0.0), (2, 2, 0.19083275099868582), (2, 3, 0.19083275099868582), (3, 0, 1.1066058655699118), (3, 1, 0.0), (3, 2, 0.19083275099868582), (3, 3, 0.19083275099868582), (4, 0, 0.9470294455845615), (4, 1, 0.05455324756220159), (4, 2, 0.15362192115139883), (4, 3, 0.15362192115139883)] + [(0, 0, 1.8940573361824338), (0, 1, 0.30724412295249387), (0, 2, 0.30724412295249387), (0, 3, 0.10910977984398254), (1, 0, 1.106606957298055), (1, 1, 0.19083366185515177), (1, 2, 0.19083366185515177), (1, 3, 0.0), (2, 0, 1.106606957298055), (2, 1, 0.19083366185515177), (2, 2, 0.19083366185515177), (2, 3, 0.0), (3, 0, 1.0146708954543895), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848905236525014), (4, 0, 0.9470286680912169), (4, 1, 0.15362206147624693), (4, 2, 0.15362206147624693), (4, 3, 0.05455488992199127)] -.. GENERATED FROM PYTHON SOURCE LINES 79-89 +.. GENERATED FROM PYTHON SOURCE LINES 76-86 Conversion into ONNX ++++++++++++++++++++ @@ -144,7 +137,7 @@ The following converter does not need to be registered, it just creates an ONNX graph equivalent to function *predict* implemented above. -.. GENERATED FROM PYTHON SOURCE LINES 89-115 +.. GENERATED FROM PYTHON SOURCE LINES 86-112 .. code-block:: default @@ -180,24 +173,14 @@ it just creates an ONNX graph equivalent to function .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 graph { - node { - input: "Ar_ArrayFeatureExtractorcst1" - input: "row" - output: "Ar_Z02" - name: "Ar_ArrayFeatureExtractor1" - op_type: "ArrayFeatureExtractor" - domain: "ai.onnx.ml" - } node { input: "Ar_ArrayFeatureExtractorcst" input: "col" @@ -206,6 +189,14 @@ it just creates an ONNX graph equivalent to function op_type: "ArrayFeatureExtractor" domain: "ai.onnx.ml" } + node { + input: "Ar_ArrayFeatureExtractorcst1" + input: "row" + output: "Ar_Z02" + name: "Ar_ArrayFeatureExtractor1" + op_type: "ArrayFeatureExtractor" + domain: "ai.onnx.ml" + } node { input: "Ar_Z0" input: "Ar_Z02" @@ -226,30 +217,30 @@ it just creates an ONNX graph equivalent to function dims: 2 dims: 4 data_type: 1 - float_data: 1.9770314693450928 + float_data: 1.9596576690673828 + float_data: 0.3379417061805725 + float_data: 0.3379417061805725 float_data: 0.0 - float_data: 0.340936541557312 - float_data: 0.340936541557312 - float_data: 0.8948392271995544 - float_data: 0.868577241897583 + float_data: 0.884244441986084 float_data: 0.0 float_data: 0.0 + float_data: 0.8582920432090759 name: "Ar_ArrayFeatureExtractorcst" } initializer { dims: 2 dims: 5 data_type: 1 - float_data: 0.9011760950088501 + float_data: 0.9091630578041077 + float_data: 0.5646940469741821 + float_data: 0.5646940469741821 float_data: 0.0 - float_data: 0.5597310066223145 - float_data: 0.5597310066223145 - float_data: 0.45058804750442505 - float_data: 0.1256151795387268 - float_data: 1.1339133977890015 + float_data: 0.45458152890205383 + float_data: 0.12712430953979492 float_data: 0.0 float_data: 0.0 - float_data: 0.0628075897693634 + float_data: 1.1475005149841309 + float_data: 0.06356215476989746 name: "Ar_ArrayFeatureExtractorcst1" } input { @@ -305,11 +296,11 @@ it just creates an ONNX graph equivalent to function -.. GENERATED FROM PYTHON SOURCE LINES 116-117 +.. GENERATED FROM PYTHON SOURCE LINES 113-114 Let's compute prediction with it. -.. GENERATED FROM PYTHON SOURCE LINES 117-139 +.. GENERATED FROM PYTHON SOURCE LINES 114-136 .. code-block:: default @@ -341,20 +332,18 @@ Let's compute prediction with it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [(0, 0, 1.894059), (0, 1, 0.10910649), (0, 2, 0.30724385), (0, 3, 0.30724385), (1, 0, 1.0146701), (1, 1, 0.98489136), (1, 2, 0.0), (1, 3, 0.0), (2, 0, 1.1066058), (2, 1, 0.0), (2, 2, 0.19083275), (2, 3, 0.19083275), (3, 0, 1.1066058), (3, 1, 0.0), (3, 2, 0.19083275), (3, 3, 0.19083275), (4, 0, 0.9470295), (4, 1, 0.054553244), (4, 2, 0.15362193), (4, 3, 0.15362193)] + [(0, 0, 1.8940574), (0, 1, 0.30724412), (0, 2, 0.30724412), (0, 3, 0.10910978), (1, 0, 1.106607), (1, 1, 0.19083367), (1, 2, 0.19083367), (1, 3, 0.0), (2, 0, 1.106607), (2, 1, 0.19083367), (2, 2, 0.19083367), (2, 3, 0.0), (3, 0, 1.014671), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848906), (4, 0, 0.9470287), (4, 1, 0.15362206), (4, 2, 0.15362206), (4, 3, 0.05455489)] -.. GENERATED FROM PYTHON SOURCE LINES 140-141 +.. GENERATED FROM PYTHON SOURCE LINES 137-138 The ONNX graph looks like the following. -.. GENERATED FROM PYTHON SOURCE LINES 141-150 +.. GENERATED FROM PYTHON SOURCE LINES 138-147 .. code-block:: default @@ -378,20 +367,18 @@ The ONNX graph looks like the following. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 1303.5, 846.5, -0.5) + (-0.5, 1654.5, 846.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 151-152 +.. GENERATED FROM PYTHON SOURCE LINES 148-149 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 152-158 +.. GENERATED FROM PYTHON SOURCE LINES 149-155 .. code-block:: default @@ -407,15 +394,13 @@ The ONNX graph looks like the following. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -423,35 +408,23 @@ The ONNX graph looks like the following. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.452 seconds) + **Total running time of the script:** ( 0 minutes 0.368 seconds) .. _sphx_glr_download_auto_examples_plot_nmf.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_nmf.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_nmf.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_nmf.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_nmf.ipynb ` + :download:`Download Jupyter notebook: plot_nmf.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_onnx_operators.rst.txt b/_sources/auto_examples/plot_onnx_operators.rst.txt index d9307045a..8811c0e58 100644 --- a/_sources/auto_examples/plot_onnx_operators.rst.txt +++ b/_sources/auto_examples/plot_onnx_operators.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_onnx_operators.py" +.. "auto_examples/plot_onnx_operators.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -40,10 +40,6 @@ graph: `PythonAPIOverview.md But it is quite verbose and makes it difficult to describe big graphs. *sklearn-onnx* implements a nicer way to test *ONNX* operators. - -.. contents:: - :local: - ONNX Python API +++++++++++++++ @@ -55,7 +51,7 @@ It relies on *protobuf* whose definition can be found on github `onnx.proto `_. -.. GENERATED FROM PYTHON SOURCE LINES 42-84 +.. GENERATED FROM PYTHON SOURCE LINES 38-80 .. code-block:: default @@ -107,12 +103,10 @@ on github `onnx.proto .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none The model is: - ir_version: 8 + ir_version: 9 producer_name: "onnx-example" graph { node { @@ -179,7 +173,7 @@ on github `onnx.proto -.. GENERATED FROM PYTHON SOURCE LINES 85-91 +.. GENERATED FROM PYTHON SOURCE LINES 81-87 Same example with sklearn-onnx ++++++++++++++++++++++++++++++ @@ -188,7 +182,7 @@ Every operator has its own class in *sklearn-onnx*. The list is dynamically created based on the installed onnx package. -.. GENERATED FROM PYTHON SOURCE LINES 91-102 +.. GENERATED FROM PYTHON SOURCE LINES 87-98 .. code-block:: default @@ -209,14 +203,12 @@ onnx package. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none The model is: ir_version: 5 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 graph { @@ -287,11 +279,11 @@ onnx package. -.. GENERATED FROM PYTHON SOURCE LINES 103-104 +.. GENERATED FROM PYTHON SOURCE LINES 99-100 Inputs and outputs can also be skipped. -.. GENERATED FROM PYTHON SOURCE LINES 104-111 +.. GENERATED FROM PYTHON SOURCE LINES 100-107 .. code-block:: default @@ -309,14 +301,14 @@ Inputs and outputs can also be skipped. -.. GENERATED FROM PYTHON SOURCE LINES 112-116 +.. GENERATED FROM PYTHON SOURCE LINES 108-112 Multiple operators ++++++++++++++++++ Let's use the second example from the documentation. -.. GENERATED FROM PYTHON SOURCE LINES 116-134 +.. GENERATED FROM PYTHON SOURCE LINES 112-130 .. code-block:: default @@ -345,11 +337,11 @@ Let's use the second example from the documentation. -.. GENERATED FROM PYTHON SOURCE LINES 135-136 +.. GENERATED FROM PYTHON SOURCE LINES 131-132 Which we translate into: -.. GENERATED FROM PYTHON SOURCE LINES 136-148 +.. GENERATED FROM PYTHON SOURCE LINES 132-144 .. code-block:: default @@ -372,11 +364,11 @@ Which we translate into: -.. GENERATED FROM PYTHON SOURCE LINES 149-150 +.. GENERATED FROM PYTHON SOURCE LINES 145-146 Let's the output with onnxruntime -.. GENERATED FROM PYTHON SOURCE LINES 150-165 +.. GENERATED FROM PYTHON SOURCE LINES 146-161 .. code-block:: default @@ -401,8 +393,6 @@ Let's the output with onnxruntime .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none {'Tr_transposed0': array([[[ 0., 1., 2., 3.], @@ -416,12 +406,12 @@ Let's the output with onnxruntime -.. GENERATED FROM PYTHON SOURCE LINES 166-168 +.. GENERATED FROM PYTHON SOURCE LINES 162-164 Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 168-182 +.. GENERATED FROM PYTHON SOURCE LINES 164-178 .. code-block:: default @@ -450,20 +440,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 1266.5, 1707.5, -0.5) + (-0.5, 1524.5, 1707.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 183-184 +.. GENERATED FROM PYTHON SOURCE LINES 179-180 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 184-192 +.. GENERATED FROM PYTHON SOURCE LINES 180-188 .. code-block:: default @@ -481,15 +469,13 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -497,35 +483,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.262 seconds) + **Total running time of the script:** ( 0 minutes 1.010 seconds) .. _sphx_glr_download_auto_examples_plot_onnx_operators.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_onnx_operators.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_onnx_operators.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_onnx_operators.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_onnx_operators.ipynb ` + :download:`Download Jupyter notebook: plot_onnx_operators.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_pipeline.rst.txt b/_sources/auto_examples/plot_pipeline.rst.txt index 9d916403a..5cb0b63a3 100644 --- a/_sources/auto_examples/plot_pipeline.rst.txt +++ b/_sources/auto_examples/plot_pipeline.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_pipeline.py" +.. "auto_examples/plot_pipeline.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -27,15 +27,12 @@ in ONNX format than looking into its node with how to draw a model and to retrieve it in *json* format. -.. contents:: - :local: - Retrieve a model in JSON format +++++++++++++++++++++++++++++++ That's the most simple way. -.. GENERATED FROM PYTHON SOURCE LINES 22-49 +.. GENERATED FROM PYTHON SOURCE LINES 19-46 .. code-block:: default @@ -72,13 +69,11 @@ That's the most simple way. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 7 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 graph { @@ -156,7 +151,7 @@ That's the most simple way. -.. GENERATED FROM PYTHON SOURCE LINES 50-57 +.. GENERATED FROM PYTHON SOURCE LINES 47-54 Draw a model with ONNX ++++++++++++++++++++++ @@ -166,7 +161,7 @@ included in *onnx* package. We use *onnx* to load the model in a different way than before. -.. GENERATED FROM PYTHON SOURCE LINES 57-64 +.. GENERATED FROM PYTHON SOURCE LINES 54-61 .. code-block:: default @@ -184,11 +179,11 @@ in a different way than before. -.. GENERATED FROM PYTHON SOURCE LINES 65-66 +.. GENERATED FROM PYTHON SOURCE LINES 62-63 We convert it into a graph. -.. GENERATED FROM PYTHON SOURCE LINES 66-70 +.. GENERATED FROM PYTHON SOURCE LINES 63-67 .. code-block:: default @@ -203,11 +198,11 @@ We convert it into a graph. -.. GENERATED FROM PYTHON SOURCE LINES 71-72 +.. GENERATED FROM PYTHON SOURCE LINES 68-69 Then into an image -.. GENERATED FROM PYTHON SOURCE LINES 72-74 +.. GENERATED FROM PYTHON SOURCE LINES 69-71 .. code-block:: default @@ -219,8 +214,6 @@ Then into an image .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none @@ -228,11 +221,11 @@ Then into an image -.. GENERATED FROM PYTHON SOURCE LINES 75-76 +.. GENERATED FROM PYTHON SOURCE LINES 72-73 Which we display... -.. GENERATED FROM PYTHON SOURCE LINES 76-80 +.. GENERATED FROM PYTHON SOURCE LINES 73-77 .. code-block:: default @@ -251,20 +244,18 @@ Which we display... .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 389.5, 602.5, -0.5) + (-0.5, 431.5, 602.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 81-82 +.. GENERATED FROM PYTHON SOURCE LINES 78-79 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 82-88 +.. GENERATED FROM PYTHON SOURCE LINES 79-85 .. code-block:: default @@ -280,15 +271,13 @@ Which we display... .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -296,35 +285,23 @@ Which we display... .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.805 seconds) + **Total running time of the script:** ( 0 minutes 0.124 seconds) .. _sphx_glr_download_auto_examples_plot_pipeline.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_pipeline.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_pipeline.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_pipeline.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_pipeline.ipynb ` + :download:`Download Jupyter notebook: plot_pipeline.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_pipeline_lightgbm.rst.txt b/_sources/auto_examples/plot_pipeline_lightgbm.rst.txt index 7c17468cb..10d5db72e 100644 --- a/_sources/auto_examples/plot_pipeline_lightgbm.rst.txt +++ b/_sources/auto_examples/plot_pipeline_lightgbm.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_pipeline_lightgbm.py" +.. "auto_examples/plot_pipeline_lightgbm.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -32,13 +32,10 @@ a pipeline including a *LightGbm* model. *sklearn-onnx* can convert the whole pipeline as long as it knows the converter associated to a *LGBMClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a LightGBM classifier +++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 25-59 +.. GENERATED FROM PYTHON SOURCE LINES 22-56 .. code-block:: default @@ -91,7 +88,7 @@ Train a LightGBM classifier

-.. GENERATED FROM PYTHON SOURCE LINES 60-71 +.. GENERATED FROM PYTHON SOURCE LINES 57-68 Register the converter for LGBMClassifier +++++++++++++++++++++++++++++++++++++++++ @@ -105,15 +102,15 @@ and the shape calculator: `_. -.. GENERATED FROM PYTHON SOURCE LINES 73-74 +.. GENERATED FROM PYTHON SOURCE LINES 70-71 Then we import the converter and shape calculator. -.. GENERATED FROM PYTHON SOURCE LINES 76-77 +.. GENERATED FROM PYTHON SOURCE LINES 73-74 Let's register the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 77-82 +.. GENERATED FROM PYTHON SOURCE LINES 74-79 .. code-block:: default @@ -129,12 +126,12 @@ Let's register the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 83-85 +.. GENERATED FROM PYTHON SOURCE LINES 80-82 Convert again +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 85-95 +.. GENERATED FROM PYTHON SOURCE LINES 82-92 .. code-block:: default @@ -155,14 +152,14 @@ Convert again -.. GENERATED FROM PYTHON SOURCE LINES 96-100 +.. GENERATED FROM PYTHON SOURCE LINES 93-97 Compare the predictions +++++++++++++++++++++++ Predictions with LightGbm. -.. GENERATED FROM PYTHON SOURCE LINES 100-104 +.. GENERATED FROM PYTHON SOURCE LINES 97-101 .. code-block:: default @@ -176,21 +173,19 @@ Predictions with LightGbm. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [2 1 2 1 1] - predict_proba [[0.2371375 0.28904016 0.47382234]] + predict [0 2 1 2 1] + predict_proba [[0.51995794 0.24549283 0.23454923]] -.. GENERATED FROM PYTHON SOURCE LINES 105-106 +.. GENERATED FROM PYTHON SOURCE LINES 102-103 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 106-119 +.. GENERATED FROM PYTHON SOURCE LINES 103-116 .. code-block:: default @@ -213,22 +208,20 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [2 1 2 1 1] - predict_proba [{0: 0.23713748157024384, 1: 0.2890401780605316, 2: 0.47382235527038574}] + predict [0 2 1 2 1] + predict_proba [{0: 0.519957959651947, 1: 0.2454928159713745, 2: 0.23454922437667847}] -.. GENERATED FROM PYTHON SOURCE LINES 120-122 +.. GENERATED FROM PYTHON SOURCE LINES 117-119 Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 122-137 +.. GENERATED FROM PYTHON SOURCE LINES 119-134 .. code-block:: default @@ -258,20 +251,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2049.5, 2558.5, -0.5) + (-0.5, 2549.5, 2558.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 138-139 +.. GENERATED FROM PYTHON SOURCE LINES 135-136 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 139-147 +.. GENERATED FROM PYTHON SOURCE LINES 136-144 .. code-block:: default @@ -289,17 +280,15 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 - onnxmltools: 1.11.1 - lightgbm: 3.3.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 + onnxmltools: 1.11.2 + lightgbm: 3.3.4 @@ -307,35 +296,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.665 seconds) + **Total running time of the script:** ( 0 minutes 1.669 seconds) .. _sphx_glr_download_auto_examples_plot_pipeline_lightgbm.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_pipeline_lightgbm.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_pipeline_lightgbm.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_pipeline_lightgbm.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_pipeline_lightgbm.ipynb ` + :download:`Download Jupyter notebook: plot_pipeline_lightgbm.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_pipeline_xgboost.rst.txt b/_sources/auto_examples/plot_pipeline_xgboost.rst.txt index 1eb8986ec..dc6e22567 100644 --- a/_sources/auto_examples/plot_pipeline_xgboost.rst.txt +++ b/_sources/auto_examples/plot_pipeline_xgboost.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_pipeline_xgboost.py" +.. "auto_examples/plot_pipeline_xgboost.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -32,13 +32,10 @@ a pipeline including a *XGBoost* model. *sklearn-onnx* can convert the whole pipeline as long as it knows the converter associated to a *XGBClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a XGBoost classifier ++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 25-75 +.. GENERATED FROM PYTHON SOURCE LINES 22-72 .. code-block:: default @@ -98,13 +95,8 @@ Train a XGBoost classifier .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\xgboost\sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. - warnings.warn(label_encoder_deprecation_msg, UserWarning) - [14:41:59] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior. Unable to find a shape calculator for type ''. It usually means the pipeline being converted contains a transformer or a predictor with no corresponding converter @@ -122,7 +114,7 @@ Train a XGBoost classifier -.. GENERATED FROM PYTHON SOURCE LINES 76-87 +.. GENERATED FROM PYTHON SOURCE LINES 73-84 Register the converter for XGBClassifier ++++++++++++++++++++++++++++++++++++++++ @@ -136,15 +128,15 @@ and the shape calculator: `_. -.. GENERATED FROM PYTHON SOURCE LINES 89-90 +.. GENERATED FROM PYTHON SOURCE LINES 86-87 Then we import the converter and shape calculator. -.. GENERATED FROM PYTHON SOURCE LINES 92-93 +.. GENERATED FROM PYTHON SOURCE LINES 89-90 Let's register the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 93-98 +.. GENERATED FROM PYTHON SOURCE LINES 90-95 .. code-block:: default @@ -160,12 +152,12 @@ Let's register the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 99-101 +.. GENERATED FROM PYTHON SOURCE LINES 96-98 Convert again +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 101-111 +.. GENERATED FROM PYTHON SOURCE LINES 98-108 .. code-block:: default @@ -186,14 +178,14 @@ Convert again -.. GENERATED FROM PYTHON SOURCE LINES 112-116 +.. GENERATED FROM PYTHON SOURCE LINES 109-113 Compare the predictions +++++++++++++++++++++++ Predictions with XGBoost. -.. GENERATED FROM PYTHON SOURCE LINES 116-120 +.. GENERATED FROM PYTHON SOURCE LINES 113-117 .. code-block:: default @@ -207,21 +199,19 @@ Predictions with XGBoost. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 0 1 1] - predict_proba [[0.1758379 0.43438542 0.3897767 ]] + predict [0 1 2 2 1] + predict_proba [[0.69600695 0.1526681 0.15132491]] -.. GENERATED FROM PYTHON SOURCE LINES 121-122 +.. GENERATED FROM PYTHON SOURCE LINES 118-119 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 122-128 +.. GENERATED FROM PYTHON SOURCE LINES 119-125 .. code-block:: default @@ -237,22 +227,20 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 0 1 1] - predict_proba [{0: 0.175837904214859, 1: 0.43438541889190674, 2: 0.38977670669555664}] + predict [0 1 2 2 1] + predict_proba [{0: 0.6960069537162781, 1: 0.15266810357570648, 2: 0.15132491290569305}] -.. GENERATED FROM PYTHON SOURCE LINES 129-131 +.. GENERATED FROM PYTHON SOURCE LINES 126-128 Display the ONNX graph ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 131-146 +.. GENERATED FROM PYTHON SOURCE LINES 128-143 .. code-block:: default @@ -282,20 +270,18 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 1979.5, 2558.5, -0.5) + (-0.5, 2485.5, 2558.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 147-148 +.. GENERATED FROM PYTHON SOURCE LINES 144-145 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 148-156 +.. GENERATED FROM PYTHON SOURCE LINES 145-153 .. code-block:: default @@ -313,17 +299,15 @@ Display the ONNX graph .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 - onnxmltools: 1.11.1 - xgboost: 1.5.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 + onnxmltools: 1.11.2 + xgboost: 1.7.3 @@ -331,35 +315,23 @@ Display the ONNX graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.010 seconds) + **Total running time of the script:** ( 0 minutes 1.691 seconds) .. _sphx_glr_download_auto_examples_plot_pipeline_xgboost.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_pipeline_xgboost.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_pipeline_xgboost.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_pipeline_xgboost.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_pipeline_xgboost.ipynb ` + :download:`Download Jupyter notebook: plot_pipeline_xgboost.ipynb ` .. only:: html diff --git a/_sources/auto_examples/plot_tfidfvectorizer.rst.txt b/_sources/auto_examples/plot_tfidfvectorizer.rst.txt index a7e95c270..f8c9e1b8d 100644 --- a/_sources/auto_examples/plot_tfidfvectorizer.rst.txt +++ b/_sources/auto_examples/plot_tfidfvectorizer.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_examples\plot_tfidfvectorizer.py" +.. "auto_examples/plot_tfidfvectorizer.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -29,9 +29,6 @@ This example is inspired from the following example: compose/plot_column_transformer.html>`_ which builds a pipeline to classify text. -.. contents:: - :local: - Train a pipeline with TfidfVectorizer +++++++++++++++++++++++++++++++++++++ @@ -39,7 +36,7 @@ It replicates the same pipeline taken from *scikit-learn* documentation but reduces it to the part ONNX actually supports without implementing a custom converter. Let's get the data. -.. GENERATED FROM PYTHON SOURCE LINES 26-64 +.. GENERATED FROM PYTHON SOURCE LINES 23-61 .. code-block:: default @@ -88,13 +85,13 @@ a custom converter. Let's get the data. -.. GENERATED FROM PYTHON SOURCE LINES 65-68 +.. GENERATED FROM PYTHON SOURCE LINES 62-65 The first transform extract two fields from the data. We take it out form the pipeline and assume the data is defined by two text columns. -.. GENERATED FROM PYTHON SOURCE LINES 68-103 +.. GENERATED FROM PYTHON SOURCE LINES 65-100 .. code-block:: default @@ -140,12 +137,12 @@ the data is defined by two text columns. -.. GENERATED FROM PYTHON SOURCE LINES 104-106 +.. GENERATED FROM PYTHON SOURCE LINES 101-103 The pipeline is almost the same except we remove the custom features. -.. GENERATED FROM PYTHON SOURCE LINES 106-140 +.. GENERATED FROM PYTHON SOURCE LINES 103-137 .. code-block:: default @@ -153,7 +150,7 @@ we remove the custom features. pipeline = Pipeline([ ('union', ColumnTransformer( [ - ('subject', TfidfVectorizer(min_df=50), 0), + ('subject', TfidfVectorizer(min_df=50, max_features=500), 0), ('body_bow', Pipeline([ ('tfidf', TfidfVectorizer()), @@ -189,8 +186,6 @@ we remove the custom features. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none precision recall f1-score support @@ -206,7 +201,7 @@ we remove the custom features. -.. GENERATED FROM PYTHON SOURCE LINES 141-150 +.. GENERATED FROM PYTHON SOURCE LINES 138-147 ONNX conversion +++++++++++++++ @@ -218,7 +213,7 @@ and is currently being implementing. The current implementation only considers a list of separators which can is defined in variable *seps*. -.. GENERATED FROM PYTHON SOURCE LINES 150-166 +.. GENERATED FROM PYTHON SOURCE LINES 147-163 .. code-block:: default @@ -242,23 +237,14 @@ in variable *seps*. -.. rst-class:: sphx-glr-script-out - Out: - - .. code-block:: none - D:\github\onnx\sklearn-onnx\skl2onnx\common\_container.py:695: UserWarning: Unable to find operator 'Tokenizer' in domain 'com.microsoft' in ONNX, op_version is forced to 1. - warnings.warn( - - - -.. GENERATED FROM PYTHON SOURCE LINES 167-168 +.. GENERATED FROM PYTHON SOURCE LINES 164-165 And save. -.. GENERATED FROM PYTHON SOURCE LINES 168-171 +.. GENERATED FROM PYTHON SOURCE LINES 165-168 .. code-block:: default @@ -272,11 +258,11 @@ And save. -.. GENERATED FROM PYTHON SOURCE LINES 172-173 +.. GENERATED FROM PYTHON SOURCE LINES 169-170 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 173-181 +.. GENERATED FROM PYTHON SOURCE LINES 170-178 .. code-block:: default @@ -294,23 +280,21 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none --- [" Re: Jews can't hide from keith@cco." 'Deletions...\n\nSo, you consider the german poster\'s remark anti-semitic? Perhaps you\nimply that anyone in Germany who doesn\'t agree with israely policy in a\nnazi? Pray tell, how does it even qualify as "casual anti-semitism"? \nIf the term doesn\'t apply, why then bring it up?\n\nYour own bigotry is shining through. \n-- '] predict [1] - predict_proba [{0: 0.43871212005615234, 1: 0.5612878799438477}] + predict_proba [{0: 0.4384377896785736, 1: 0.561562180519104}] -.. GENERATED FROM PYTHON SOURCE LINES 182-183 +.. GENERATED FROM PYTHON SOURCE LINES 179-180 With *scikit-learn*: -.. GENERATED FROM PYTHON SOURCE LINES 183-186 +.. GENERATED FROM PYTHON SOURCE LINES 180-183 .. code-block:: default @@ -323,30 +307,28 @@ With *scikit-learn*: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [0] - [[0.71588446 0.28411554]] + [[0.71903792 0.28096208]] -.. GENERATED FROM PYTHON SOURCE LINES 187-190 +.. GENERATED FROM PYTHON SOURCE LINES 184-187 There are discrepencies for this model because the tokenization is not exactly the same. This is a work in progress. -.. GENERATED FROM PYTHON SOURCE LINES 192-196 +.. GENERATED FROM PYTHON SOURCE LINES 189-193 Display the ONNX graph ++++++++++++++++++++++ Finally, let's see the graph converted with *sklearn-onnx*. -.. GENERATED FROM PYTHON SOURCE LINES 196-211 +.. GENERATED FROM PYTHON SOURCE LINES 193-208 .. code-block:: default @@ -376,47 +358,33 @@ Finally, let's see the graph converted with *sklearn-onnx*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 3939.5, 12237.5, -0.5) + (-0.5, 4939.5, 11475.5, -0.5) .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 8.337 seconds) + **Total running time of the script:** ( 0 minutes 10.979 seconds) .. _sphx_glr_download_auto_examples_plot_tfidfvectorizer.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_examples/plot_tfidfvectorizer.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_tfidfvectorizer.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_tfidfvectorizer.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_tfidfvectorizer.ipynb ` + :download:`Download Jupyter notebook: plot_tfidfvectorizer.ipynb ` .. only:: html diff --git a/_sources/auto_examples/sg_execution_times.rst.txt b/_sources/auto_examples/sg_execution_times.rst.txt index 81c343170..ea812e89e 100644 --- a/_sources/auto_examples/sg_execution_times.rst.txt +++ b/_sources/auto_examples/sg_execution_times.rst.txt @@ -5,56 +5,56 @@ Computation times ================= -**01:03.411** total execution time for **auto_examples** files: +**01:36.683** total execution time for **auto_examples** files: +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_black_op.py` (``plot_black_op.py``) | 00:11.663 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_custom_model.py` (``plot_custom_model.py``) | 00:25.419 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_custom_model.py` (``plot_custom_model.py``) | 00:08.397 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_black_op.py` (``plot_black_op.py``) | 00:21.877 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_tfidfvectorizer.py` (``plot_tfidfvectorizer.py``) | 00:08.337 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_tfidfvectorizer.py` (``plot_tfidfvectorizer.py``) | 00:10.979 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_complex_pipeline.py` (``plot_complex_pipeline.py``) | 00:06.075 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_benchmark_pipeline.py` (``plot_benchmark_pipeline.py``) | 00:06.685 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_intermediate_outputs.py` (``plot_intermediate_outputs.py``) | 00:03.518 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_complex_pipeline.py` (``plot_complex_pipeline.py``) | 00:05.573 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_benchmark_pipeline.py` (``plot_benchmark_pipeline.py``) | 00:03.155 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_intermediate_outputs.py` (``plot_intermediate_outputs.py``) | 00:04.105 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_convert_syntax.py` (``plot_convert_syntax.py``) | 00:02.972 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_cast_transformer.py` (``plot_cast_transformer.py``) | 00:03.795 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_cast_transformer.py` (``plot_cast_transformer.py``) | 00:02.944 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_convert_syntax.py` (``plot_convert_syntax.py``) | 00:03.632 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_benchmark_cdist.py` (``plot_benchmark_cdist.py``) | 00:02.854 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_custom_parser_alternative.py` (``plot_custom_parser_alternative.py``) | 00:02.812 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_custom_parser_alternative.py` (``plot_custom_parser_alternative.py``) | 00:02.296 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_custom_parser.py` (``plot_custom_parser.py``) | 00:02.623 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_custom_parser.py` (``plot_custom_parser.py``) | 00:02.021 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_benchmark_cdist.py` (``plot_benchmark_cdist.py``) | 00:01.989 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_pipeline_xgboost.py` (``plot_pipeline_xgboost.py``) | 00:02.010 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_pipeline_xgboost.py` (``plot_pipeline_xgboost.py``) | 00:01.691 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_pipeline_lightgbm.py` (``plot_pipeline_lightgbm.py``) | 00:01.665 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_pipeline_lightgbm.py` (``plot_pipeline_lightgbm.py``) | 00:01.669 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_gpr.py` (``plot_gpr.py``) | 00:01.528 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_gpr.py` (``plot_gpr.py``) | 00:01.592 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_onnx_operators.py` (``plot_onnx_operators.py``) | 00:01.262 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_onnx_operators.py` (``plot_onnx_operators.py``) | 00:01.010 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_pipeline.py` (``plot_pipeline.py``) | 00:00.805 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_nmf.py` (``plot_nmf.py``) | 00:00.368 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_nmf.py` (``plot_nmf.py``) | 00:00.452 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_investigate_pipeline.py` (``plot_investigate_pipeline.py``) | 00:00.274 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_investigate_pipeline.py` (``plot_investigate_pipeline.py``) | 00:00.402 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_convert_model.py` (``plot_convert_model.py``) | 00:00.182 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_backend.py` (``plot_backend.py``) | 00:00.285 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_convert_zipmap.py` (``plot_convert_zipmap.py``) | 00:00.153 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_convert_model.py` (``plot_convert_model.py``) | 00:00.244 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_pipeline.py` (``plot_pipeline.py``) | 00:00.124 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_convert_zipmap.py` (``plot_convert_zipmap.py``) | 00:00.195 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_logging.py` (``plot_logging.py``) | 00:00.092 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_logging.py` (``plot_logging.py``) | 00:00.114 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_convert_decision_function.py` (``plot_convert_decision_function.py``) | 00:00.042 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_metadata.py` (``plot_metadata.py``) | 00:00.082 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_backend.py` (``plot_backend.py``) | 00:00.000 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_errors_onnxruntime.py` (``plot_errors_onnxruntime.py``) | 00:00.067 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_errors_onnxruntime.py` (``plot_errors_onnxruntime.py``) | 00:00.000 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_plot_convert_decision_function.py` (``plot_convert_decision_function.py``) | 00:00.067 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_plot_metadata.py` (``plot_metadata.py``) | 00:00.000 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/_sources/auto_tutorial/index.rst.txt b/_sources/auto_tutorial/index.rst.txt index 39be01a5e..8bc2f9f54 100644 --- a/_sources/auto_tutorial/index.rst.txt +++ b/_sources/auto_tutorial/index.rst.txt @@ -1,12 +1,13 @@ :orphan: +Examples +======== -.. _sphx_glr_auto_tutorial: -Examples -======== +.. raw:: html +
.. raw:: html @@ -15,20 +16,33 @@ Examples .. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_qextend_onnxruntime_thumb.png - :alt: Fast runtime with onnxruntime + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_qextend_onnxruntime_thumb.png + :alt: Fast runtime with onnxruntime - :ref:`sphx_glr_auto_tutorial_plot_qextend_onnxruntime.py` + :ref:`sphx_glr_auto_tutorial_plot_qextend_onnxruntime.py` .. raw:: html +
Fast runtime with onnxruntime
-.. toctree:: - :hidden: +.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_ngrams_thumb.png + :alt: Tricky issue when converting CountVectorizer or TfidfVectorizer + + :ref:`sphx_glr_auto_tutorial_plot_ngrams.py` + +.. raw:: html + +
Tricky issue when converting CountVectorizer or TfidfVectorizer
+
- /auto_tutorial/plot_qextend_onnxruntime .. raw:: html @@ -36,167 +50,152 @@ Examples .. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_abegin_convert_pipeline_thumb.png - :alt: Train and deploy a scikit-learn pipeline + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_abegin_convert_pipeline_thumb.png + :alt: Train and deploy a scikit-learn pipeline - :ref:`sphx_glr_auto_tutorial_plot_abegin_convert_pipeline.py` + :ref:`sphx_glr_auto_tutorial_plot_abegin_convert_pipeline.py` .. raw:: html +
Train and deploy a scikit-learn pipeline
-.. toctree:: - :hidden: - - /auto_tutorial/plot_abegin_convert_pipeline - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_cst_thumb.png - :alt: Store arrays in one onnx graph + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_cst_thumb.png + :alt: Store arrays in one onnx graph - :ref:`sphx_glr_auto_tutorial_plot_gbegin_cst.py` + :ref:`sphx_glr_auto_tutorial_plot_gbegin_cst.py` .. raw:: html +
Store arrays in one onnx graph
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gbegin_cst - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_lightgbm_thumb.png - :alt: Convert a pipeline with a LightGBM classifier + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_lightgbm_thumb.png + :alt: Convert a pipeline with a LightGBM classifier - :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm.py` + :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm.py` .. raw:: html +
Convert a pipeline with a LightGBM classifier
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gexternal_lightgbm - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_cbegin_opset_thumb.png - :alt: What is the opset number? + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_cbegin_opset_thumb.png + :alt: What is the opset number? - :ref:`sphx_glr_auto_tutorial_plot_cbegin_opset.py` + :ref:`sphx_glr_auto_tutorial_plot_cbegin_opset.py` .. raw:: html +
What is the opset number?
-.. toctree:: - :hidden: - - /auto_tutorial/plot_cbegin_opset - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_list_thumb.png - :alt: Black list operators when converting + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_list_thumb.png + :alt: Black list operators when converting - :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_list.py` + :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_list.py` .. raw:: html +
Black list operators when converting
-.. toctree:: - :hidden: - - /auto_tutorial/plot_dbegin_options_list - .. raw:: html -
+
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_fbegin_investigate_thumb.png - :alt: Intermediate results and investigation + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_transformer_discrepancy_thumb.png + :alt: Dealing with discrepancies (tf-idf) - :ref:`sphx_glr_auto_tutorial_plot_fbegin_investigate.py` + :ref:`sphx_glr_auto_tutorial_plot_transformer_discrepancy.py` .. raw:: html +
Dealing with discrepancies (tf-idf)
-.. toctree:: - :hidden: - - /auto_tutorial/plot_fbegin_investigate - .. raw:: html -
+
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gconverting_thumb.png - :alt: Modify the ONNX graph + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_fbegin_investigate_thumb.png + :alt: Intermediate results and investigation - :ref:`sphx_glr_auto_tutorial_plot_gconverting.py` + :ref:`sphx_glr_auto_tutorial_plot_fbegin_investigate.py` .. raw:: html +
Intermediate results and investigation
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gconverting - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_woe_transformer_thumb.png - :alt: Converter for WOE + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_woe_transformer_thumb.png + :alt: Converter for WOE - :ref:`sphx_glr_auto_tutorial_plot_woe_transformer.py` + :ref:`sphx_glr_auto_tutorial_plot_woe_transformer.py` .. raw:: html +
Converter for WOE
-.. toctree:: - :hidden: +.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gconverting_thumb.png + :alt: Modify the ONNX graph + + :ref:`sphx_glr_auto_tutorial_plot_gconverting.py` + +.. raw:: html + +
Modify the ONNX graph
+
- /auto_tutorial/plot_woe_transformer .. raw:: html @@ -204,83 +203,84 @@ Examples .. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_bbegin_measure_time_thumb.png - :alt: Benchmark ONNX conversion + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_bbegin_measure_time_thumb.png + :alt: Benchmark ONNX conversion - :ref:`sphx_glr_auto_tutorial_plot_bbegin_measure_time.py` + :ref:`sphx_glr_auto_tutorial_plot_bbegin_measure_time.py` .. raw:: html +
Benchmark ONNX conversion
-.. toctree:: - :hidden: - - /auto_tutorial/plot_bbegin_measure_time - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_dataframe_thumb.png - :alt: Dataframe as an input + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_dataframe_thumb.png + :alt: Dataframe as an input - :ref:`sphx_glr_auto_tutorial_plot_gbegin_dataframe.py` + :ref:`sphx_glr_auto_tutorial_plot_gbegin_dataframe.py` .. raw:: html +
Dataframe as an input
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gbegin_dataframe - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png - :alt: Convert a pipeline with a LightGBM regressor + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_lightgbm_reg_thumb.png + :alt: Convert a pipeline with a LightGBM regressor - :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm_reg.py` + :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm_reg.py` .. raw:: html +
Convert a pipeline with a LightGBM regressor
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gexternal_lightgbm_reg - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_kcustom_converter_wrapper_thumb.png - :alt: Implement a new converter using other converters + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_kcustom_converter_wrapper_thumb.png + :alt: Implement a new converter using other converters - :ref:`sphx_glr_auto_tutorial_plot_kcustom_converter_wrapper.py` + :ref:`sphx_glr_auto_tutorial_plot_kcustom_converter_wrapper.py` .. raw:: html +
Implement a new converter using other converters
-.. toctree:: - :hidden: +.. raw:: html + +
+ +.. only:: html + + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_ebegin_float_double_thumb.png + :alt: Issues when switching to float + + :ref:`sphx_glr_auto_tutorial_plot_ebegin_float_double.py` + +.. raw:: html + +
Issues when switching to float
+
- /auto_tutorial/plot_kcustom_converter_wrapper .. raw:: html @@ -288,293 +288,271 @@ Examples .. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_zipmap_thumb.png - :alt: Choose appropriate output of a classifier + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_zipmap_thumb.png + :alt: Choose appropriate output of a classifier - :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_zipmap.py` + :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_zipmap.py` .. raw:: html +
Choose appropriate output of a classifier
-.. toctree:: - :hidden: - - /auto_tutorial/plot_dbegin_options_zipmap - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_thumb.png - :alt: One model, many possible conversions with options + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_dbegin_options_thumb.png + :alt: One model, many possible conversions with options - :ref:`sphx_glr_auto_tutorial_plot_dbegin_options.py` + :ref:`sphx_glr_auto_tutorial_plot_dbegin_options.py` .. raw:: html +
One model, many possible conversions with options
-.. toctree:: - :hidden: - - /auto_tutorial/plot_dbegin_options - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_mcustom_parser_thumb.png - :alt: Change the number of outputs by adding a parser + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_mcustom_parser_thumb.png + :alt: Change the number of outputs by adding a parser - :ref:`sphx_glr_auto_tutorial_plot_mcustom_parser.py` + :ref:`sphx_glr_auto_tutorial_plot_mcustom_parser.py` .. raw:: html +
Change the number of outputs by adding a parser
-.. toctree:: - :hidden: - - /auto_tutorial/plot_mcustom_parser - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_jcustom_syntax_thumb.png - :alt: Two ways to implement a converter + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_jcustom_syntax_thumb.png + :alt: Two ways to implement a converter - :ref:`sphx_glr_auto_tutorial_plot_jcustom_syntax.py` + :ref:`sphx_glr_auto_tutorial_plot_jcustom_syntax.py` .. raw:: html +
Two ways to implement a converter
-.. toctree:: - :hidden: - - /auto_tutorial/plot_jcustom_syntax - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_lcustom_options_thumb.png - :alt: A new converter with options + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_lcustom_options_thumb.png + :alt: A new converter with options - :ref:`sphx_glr_auto_tutorial_plot_lcustom_options.py` + :ref:`sphx_glr_auto_tutorial_plot_lcustom_options.py` .. raw:: html +
A new converter with options
-.. toctree:: - :hidden: - - /auto_tutorial/plot_lcustom_options - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_xgboost_thumb.png - :alt: Convert a pipeline with a XGBoost model + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_xgboost_thumb.png + :alt: Convert a pipeline with a XGBoost model - :ref:`sphx_glr_auto_tutorial_plot_gexternal_xgboost.py` + :ref:`sphx_glr_auto_tutorial_plot_gexternal_xgboost.py` .. raw:: html +
Convert a pipeline with a XGBoost model
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gexternal_xgboost - .. raw:: html -
+
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_ebegin_float_double_thumb.png - :alt: Issues when switching to float + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gexternal_catboost_thumb.png + :alt: Convert a pipeline with a CatBoost classifier - :ref:`sphx_glr_auto_tutorial_plot_ebegin_float_double.py` + :ref:`sphx_glr_auto_tutorial_plot_gexternal_catboost.py` .. raw:: html +
Convert a pipeline with a CatBoost classifier
-.. toctree:: - :hidden: - - /auto_tutorial/plot_ebegin_float_double - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_transfer_learning_thumb.png - :alt: Transfer Learning with ONNX + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_gbegin_transfer_learning_thumb.png + :alt: Transfer Learning with ONNX - :ref:`sphx_glr_auto_tutorial_plot_gbegin_transfer_learning.py` + :ref:`sphx_glr_auto_tutorial_plot_gbegin_transfer_learning.py` .. raw:: html +
Transfer Learning with ONNX
-.. toctree:: - :hidden: - - /auto_tutorial/plot_gbegin_transfer_learning - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_icustom_converter_thumb.png - :alt: Implement a new converter + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_icustom_converter_thumb.png + :alt: Implement a new converter - :ref:`sphx_glr_auto_tutorial_plot_icustom_converter.py` + :ref:`sphx_glr_auto_tutorial_plot_icustom_converter.py` .. raw:: html +
Implement a new converter
-.. toctree:: - :hidden: - - /auto_tutorial/plot_icustom_converter - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_wext_pyod_forest_thumb.png - :alt: Converter for pyod.models.iforest.IForest + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_wext_pyod_forest_thumb.png + :alt: Converter for pyod.models.iforest.IForest - :ref:`sphx_glr_auto_tutorial_plot_wext_pyod_forest.py` + :ref:`sphx_glr_auto_tutorial_plot_wext_pyod_forest.py` .. raw:: html +
Converter for pyod.models.iforest.IForest
-.. toctree:: - :hidden: - - /auto_tutorial/plot_wext_pyod_forest - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_catwoe_transformer_thumb.png - :alt: Converter for WOEEncoder from categorical_encoder + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_catwoe_transformer_thumb.png + :alt: Converter for WOEEncoder from categorical_encoder - :ref:`sphx_glr_auto_tutorial_plot_catwoe_transformer.py` + :ref:`sphx_glr_auto_tutorial_plot_catwoe_transformer.py` .. raw:: html +
Converter for WOEEncoder from categorical_encoder
-.. toctree:: - :hidden: - - /auto_tutorial/plot_catwoe_transformer - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_usparse_xgboost_thumb.png - :alt: TfIdf and sparse matrices + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_usparse_xgboost_thumb.png + :alt: TfIdf and sparse matrices - :ref:`sphx_glr_auto_tutorial_plot_usparse_xgboost.py` + :ref:`sphx_glr_auto_tutorial_plot_usparse_xgboost.py` .. raw:: html +
TfIdf and sparse matrices
-.. toctree:: - :hidden: - - /auto_tutorial/plot_usparse_xgboost - .. raw:: html
.. only:: html - .. figure:: /auto_tutorial/images/thumb/sphx_glr_plot_pextend_python_runtime_thumb.png - :alt: Fast design with a python runtime + .. image:: /auto_tutorial/images/thumb/sphx_glr_plot_pextend_python_runtime_thumb.png + :alt: Fast design with a python runtime - :ref:`sphx_glr_auto_tutorial_plot_pextend_python_runtime.py` + :ref:`sphx_glr_auto_tutorial_plot_pextend_python_runtime.py` .. raw:: html +
Fast design with a python runtime
-.. toctree:: - :hidden: - - /auto_tutorial/plot_pextend_python_runtime .. raw:: html -
- +
-.. only :: html +.. toctree:: + :hidden: - .. container:: sphx-glr-footer - :class: sphx-glr-footer-gallery + /auto_tutorial/plot_qextend_onnxruntime + /auto_tutorial/plot_ngrams + /auto_tutorial/plot_abegin_convert_pipeline + /auto_tutorial/plot_gbegin_cst + /auto_tutorial/plot_gexternal_lightgbm + /auto_tutorial/plot_cbegin_opset + /auto_tutorial/plot_dbegin_options_list + /auto_tutorial/plot_transformer_discrepancy + /auto_tutorial/plot_fbegin_investigate + /auto_tutorial/plot_woe_transformer + /auto_tutorial/plot_gconverting + /auto_tutorial/plot_bbegin_measure_time + /auto_tutorial/plot_gbegin_dataframe + /auto_tutorial/plot_gexternal_lightgbm_reg + /auto_tutorial/plot_kcustom_converter_wrapper + /auto_tutorial/plot_ebegin_float_double + /auto_tutorial/plot_dbegin_options_zipmap + /auto_tutorial/plot_dbegin_options + /auto_tutorial/plot_mcustom_parser + /auto_tutorial/plot_jcustom_syntax + /auto_tutorial/plot_lcustom_options + /auto_tutorial/plot_gexternal_xgboost + /auto_tutorial/plot_gexternal_catboost + /auto_tutorial/plot_gbegin_transfer_learning + /auto_tutorial/plot_icustom_converter + /auto_tutorial/plot_wext_pyod_forest + /auto_tutorial/plot_catwoe_transformer + /auto_tutorial/plot_usparse_xgboost + /auto_tutorial/plot_pextend_python_runtime - .. container:: sphx-glr-download sphx-glr-download-python +.. only:: html - :download:`Download all examples in Python source code: auto_tutorial_python.zip ` + .. container:: sphx-glr-footer sphx-glr-footer-gallery + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download all examples in Python source code: auto_tutorial_python.zip ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download all examples in Jupyter notebooks: auto_tutorial_jupyter.zip ` + :download:`Download all examples in Jupyter notebooks: auto_tutorial_jupyter.zip ` .. only:: html diff --git a/_sources/auto_tutorial/plot_abegin_convert_pipeline.rst.txt b/_sources/auto_tutorial/plot_abegin_convert_pipeline.rst.txt index f5ad79878..7cadb0465 100644 --- a/_sources/auto_tutorial/plot_abegin_convert_pipeline.rst.txt +++ b/_sources/auto_tutorial/plot_abegin_convert_pipeline.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_abegin_convert_pipeline.py" +.. "auto_tutorial/plot_abegin_convert_pipeline.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -31,14 +31,10 @@ documentation: `Plot individual and voting regression predictions converts it into ONNX and finally computes the predictions a different runtime. -.. contents:: - :local: - - Training a pipeline +++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 24-51 +.. GENERATED FROM PYTHON SOURCE LINES 20-47 .. code-block:: default @@ -77,32 +73,32 @@ Training a pipeline .. raw:: html
-
Pipeline(steps=[('voting',
+    
Pipeline(steps=[('voting',
                      VotingRegressor(estimators=[('gb',
                                                   GradientBoostingRegressor(n_estimators=5,
                                                                             random_state=1)),
                                                  ('rf',
                                                   RandomForestRegressor(n_estimators=5,
                                                                         random_state=1)),
-                                                 ('lr', LinearRegression())]))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
GradientBoostingRegressor(n_estimators=5, random_state=1)
RandomForestRegressor(n_estimators=5, random_state=1)
LinearRegression()


-.. GENERATED FROM PYTHON SOURCE LINES 52-60 +.. GENERATED FROM PYTHON SOURCE LINES 48-56 Converts the model ++++++++++++++++++ @@ -113,7 +109,7 @@ the input type of the ONNX graph. It is converted into single float and ONNX runtimes may not fully support doubles. -.. GENERATED FROM PYTHON SOURCE LINES 60-64 +.. GENERATED FROM PYTHON SOURCE LINES 56-60 .. code-block:: default @@ -128,14 +124,14 @@ support doubles. -.. GENERATED FROM PYTHON SOURCE LINES 65-69 +.. GENERATED FROM PYTHON SOURCE LINES 61-65 Prediction with ONNX ++++++++++++++++++++ The first example uses :epkg:`onnxruntime`. -.. GENERATED FROM PYTHON SOURCE LINES 69-78 +.. GENERATED FROM PYTHON SOURCE LINES 65-74 .. code-block:: default @@ -154,23 +150,21 @@ The first example uses :epkg:`onnxruntime`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Onnx Runtime prediction: - [[169.97092] - [165.00122] - [161.92123] - [129.07263] - [177.11513]] + [[214.14449 ] + [107.54984 ] + [ 83.3338 ] + [168.55504 ] + [ 85.816086]] Sklearn rediction: - [169.97090438 165.00121185 161.92123427 129.07263128 177.11512633] + [214.14448115 107.54983764 83.33379987 168.55502653 85.81608297] -.. GENERATED FROM PYTHON SOURCE LINES 79-86 +.. GENERATED FROM PYTHON SOURCE LINES 75-82 .. _l-diff-dicrepencies: @@ -180,7 +174,7 @@ Comparison Before deploying, we need to compare that both *scikit-learn* and *ONNX* return the same predictions. -.. GENERATED FROM PYTHON SOURCE LINES 86-97 +.. GENERATED FROM PYTHON SOURCE LINES 82-93 .. code-block:: default @@ -201,16 +195,14 @@ Before deploying, we need to compare that both .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (2.888406933720944e-05, 1.3576994417322481e-07) + (2.225058631211141e-05, 1.2777930441858178e-07) -.. GENERATED FROM PYTHON SOURCE LINES 98-104 +.. GENERATED FROM PYTHON SOURCE LINES 94-100 It looks good. Biggest errors (absolute and relative) are within the margin error introduced by using @@ -219,7 +211,7 @@ We can save the model into ONNX format and compute the same predictions in many platform using :epkg:`onnxruntime`. -.. GENERATED FROM PYTHON SOURCE LINES 106-114 +.. GENERATED FROM PYTHON SOURCE LINES 102-110 Python runtime ++++++++++++++ @@ -230,7 +222,7 @@ production (it still relies on python), but it is useful to investigate why the conversion went wrong. It uses module :epkg:`mlprodict`. -.. GENERATED FROM PYTHON SOURCE LINES 114-118 +.. GENERATED FROM PYTHON SOURCE LINES 110-114 .. code-block:: default @@ -244,26 +236,24 @@ It uses module :epkg:`mlprodict`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # init: w0 (w0) # inputs X = dict_inputs['X'] - (var_2, ) = n0_linearregressor(X) - (var_0, ) = n1_treeensembleregressor_1(X) - (var_1, ) = n2_treeensembleregressor_1(X) - (wvar_2, ) = n3_mul(var_2, w0) - (wvar_1, ) = n4_mul(var_1, w0) - (wvar_0, ) = n5_mul(var_0, w0) - (fvar_2, ) = n6_flatten(wvar_2) + (var_0, ) = n0_treeensembleregressor_1(X) + (var_1, ) = n1_treeensembleregressor_1(X) + (var_2, ) = n2_linearregressor(X) + (wvar_1, ) = n3_mul(var_1, w0) + (wvar_0, ) = n4_mul(var_0, w0) + (wvar_2, ) = n5_mul(var_2, w0) + (fvar_0, ) = n6_flatten(wvar_0) (fvar_1, ) = n7_flatten(wvar_1) - (fvar_0, ) = n8_flatten(wvar_0) + (fvar_2, ) = n8_flatten(wvar_2) (variable, ) = n9_sum(fvar_0, fvar_1, fvar_2) return { 'variable': variable, @@ -272,11 +262,11 @@ It uses module :epkg:`mlprodict`. -.. GENERATED FROM PYTHON SOURCE LINES 119-120 +.. GENERATED FROM PYTHON SOURCE LINES 115-116 It works almost the same way. -.. GENERATED FROM PYTHON SOURCE LINES 120-124 +.. GENERATED FROM PYTHON SOURCE LINES 116-120 .. code-block:: default @@ -290,22 +280,20 @@ It works almost the same way. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (2.888406933720944e-05, 1.3576994417322481e-07) + (2.225058631211141e-05, 1.2777930441858178e-07) -.. GENERATED FROM PYTHON SOURCE LINES 125-128 +.. GENERATED FROM PYTHON SOURCE LINES 121-124 Final graph You may need to install graphviz from https://graphviz.org/download/ +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 128-132 +.. GENERATED FROM PYTHON SOURCE LINES 124-128 .. code-block:: default @@ -328,35 +316,23 @@ You may need to install graphviz from https://graphviz.org/download/ .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.391 seconds) + **Total running time of the script:** ( 0 minutes 1.079 seconds) .. _sphx_glr_download_auto_tutorial_plot_abegin_convert_pipeline.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_abegin_convert_pipeline.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_abegin_convert_pipeline.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_abegin_convert_pipeline.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_abegin_convert_pipeline.ipynb ` + :download:`Download Jupyter notebook: plot_abegin_convert_pipeline.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_bbegin_measure_time.rst.txt b/_sources/auto_tutorial/plot_bbegin_measure_time.rst.txt index 5ca338d14..267a9469c 100644 --- a/_sources/auto_tutorial/plot_bbegin_measure_time.rst.txt +++ b/_sources/auto_tutorial/plot_bbegin_measure_time.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_bbegin_measure_time.py" +.. "auto_tutorial/plot_bbegin_measure_time.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -28,14 +28,10 @@ This example takes a similar example but on random data and compares the processing time required by each option to compute predictions. -.. contents:: - :local: - - Training a pipeline +++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 21-50 +.. GENERATED FROM PYTHON SOURCE LINES 17-46 .. code-block:: default @@ -74,8 +70,6 @@ Training a pipeline .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Train shape (110, 10) @@ -85,16 +79,16 @@ Training a pipeline .. raw:: html
-
VotingRegressor(estimators=[('gb', GradientBoostingRegressor(random_state=1)),
+    
VotingRegressor(estimators=[('gb', GradientBoostingRegressor(random_state=1)),
                                 ('rf', RandomForestRegressor(random_state=1)),
-                                ('lr', LinearRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
GradientBoostingRegressor(random_state=1)
RandomForestRegressor(random_state=1)
LinearRegression()


-.. GENERATED FROM PYTHON SOURCE LINES 51-60 +.. GENERATED FROM PYTHON SOURCE LINES 47-56 Measure the processing time +++++++++++++++++++++++++++ @@ -106,7 +100,7 @@ may be useful if you need to optimize the prediction. We measure the processing time per observation whether or not an observation belongs to a batch or is a single one. -.. GENERATED FROM PYTHON SOURCE LINES 60-77 +.. GENERATED FROM PYTHON SOURCE LINES 56-73 .. code-block:: default @@ -133,11 +127,9 @@ or not an observation belongs to a batch or is a single one. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00 0 - 0.004473 - 0.000534 - 0.004291 - 0.008135 + 0.007118 + 0.001354 + 0.005786 + 0.012553 50 10 1 - 0.004473 + 0.007118 1 - 0.004460 - 0.000343 - 0.004332 - 0.006835 + 0.006686 + 0.001005 + 0.005542 + 0.010000 50 10 10 - 0.000446 + 0.000669 2 - 0.017720 - 0.001314 - 0.016942 - 0.021182 + 0.037563 + 0.003221 + 0.033256 + 0.045436 10 10 1000 - 0.000018 + 0.000038 3 - 0.097764 - 0.003830 - 0.090896 - 0.102486 + 0.162250 + 0.008343 + 0.153190 + 0.177519 5 10 10000 - 0.000010 + 0.000016 @@ -223,11 +215,11 @@ or not an observation belongs to a batch or is a single one.

-.. GENERATED FROM PYTHON SOURCE LINES 78-79 +.. GENERATED FROM PYTHON SOURCE LINES 74-75 Graphe. -.. GENERATED FROM PYTHON SOURCE LINES 79-83 +.. GENERATED FROM PYTHON SOURCE LINES 75-79 .. code-block:: default @@ -247,7 +239,7 @@ Graphe. -.. GENERATED FROM PYTHON SOURCE LINES 84-89 +.. GENERATED FROM PYTHON SOURCE LINES 80-85 ONNX runtime ++++++++++++ @@ -255,7 +247,7 @@ ONNX runtime The same is done with the two ONNX runtime available. -.. GENERATED FROM PYTHON SOURCE LINES 89-127 +.. GENERATED FROM PYTHON SOURCE LINES 85-123 .. code-block:: default @@ -303,11 +295,9 @@ available. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00 0 - 0.004481 - 0.000166 - 0.004401 - 0.005481 + 0.007177 + 0.000710 + 0.005820 + 0.010053 50 10 1 - 0.004481 - 0.000079 - 0.000067 + 0.007177 + 0.000026 + 0.000077 1 - 0.005204 - 0.000348 - 0.005031 - 0.007292 + 0.006645 + 0.000843 + 0.005670 + 0.008772 50 10 10 - 0.000520 - 0.000012 - 0.000014 + 0.000664 + 0.000010 + 0.000010 2 - 0.017302 - 0.000573 - 0.016954 - 0.018913 + 0.034182 + 0.001519 + 0.032991 + 0.038628 10 10 1000 - 0.000017 - 0.000001 - 0.000002 + 0.000034 + 0.000004 + 0.000080 3 - 0.095836 - 0.009330 - 0.084142 - 0.111543 + 0.155520 + 0.003090 + 0.151416 + 0.160624 5 10 10000 - 0.000010 - 0.000001 - 0.000002 + 0.000016 + 0.000003 + 0.000014 @@ -403,11 +393,11 @@ available.

-.. GENERATED FROM PYTHON SOURCE LINES 128-129 +.. GENERATED FROM PYTHON SOURCE LINES 124-125 Graph. -.. GENERATED FROM PYTHON SOURCE LINES 129-134 +.. GENERATED FROM PYTHON SOURCE LINES 125-130 .. code-block:: default @@ -428,7 +418,7 @@ Graph. -.. GENERATED FROM PYTHON SOURCE LINES 135-141 +.. GENERATED FROM PYTHON SOURCE LINES 131-137 :epkg:`ONNX` runtimes are much faster than :epkg:`scikit-learn` to predict one observation. :epkg:`scikit-learn` is optimized @@ -440,35 +430,23 @@ parallelization and languages (:epkg:`C++`, :epkg:`openmp`). .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 31.317 seconds) + **Total running time of the script:** ( 0 minutes 55.146 seconds) .. _sphx_glr_download_auto_tutorial_plot_bbegin_measure_time.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_bbegin_measure_time.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_bbegin_measure_time.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_bbegin_measure_time.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_bbegin_measure_time.ipynb ` + :download:`Download Jupyter notebook: plot_bbegin_measure_time.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_catwoe_transformer.rst.txt b/_sources/auto_tutorial/plot_catwoe_transformer.rst.txt index 9714f2da2..9e8ad2b27 100644 --- a/_sources/auto_tutorial/plot_catwoe_transformer.rst.txt +++ b/_sources/auto_tutorial/plot_catwoe_transformer.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_catwoe_transformer.py" +.. "auto_tutorial/plot_catwoe_transformer.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -34,9 +34,6 @@ cases the original encoder can handle. .. index:: WOE, WOEEncoder -.. contents:: - :local: - A simple example ++++++++++++++++ @@ -44,7 +41,7 @@ Let's take the `Iris dataset `_. Every feature is converter into integer. -.. GENERATED FROM PYTHON SOURCE LINES 30-51 +.. GENERATED FROM PYTHON SOURCE LINES 27-48 .. code-block:: default @@ -75,8 +72,6 @@ Every feature is converter into integer. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 0 1 @@ -89,14 +84,14 @@ Every feature is converter into integer. -.. GENERATED FROM PYTHON SOURCE LINES 52-56 +.. GENERATED FROM PYTHON SOURCE LINES 49-53 Let's look into the trained parameters of the model. It appears that WOEEncoder uses an OrdinalEncoder but not the one from scikit-learn. We need to add a converter for this model tool. -.. GENERATED FROM PYTHON SOURCE LINES 56-62 +.. GENERATED FROM PYTHON SOURCE LINES 53-59 .. code-block:: default @@ -112,8 +107,6 @@ converter for this model tool. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none encoder OrdinalEncoder(cols=[0], @@ -143,7 +136,7 @@ converter for this model tool. -.. GENERATED FROM PYTHON SOURCE LINES 63-68 +.. GENERATED FROM PYTHON SOURCE LINES 60-65 Custom converter for OrdinalEncoder +++++++++++++++++++++++++++++++++++ @@ -151,7 +144,7 @@ Custom converter for OrdinalEncoder We start from example :ref:`l-plot-custom-converter` and then write the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 68-121 +.. GENERATED FROM PYTHON SOURCE LINES 65-118 .. code-block:: default @@ -215,11 +208,11 @@ and then write the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 122-123 +.. GENERATED FROM PYTHON SOURCE LINES 119-120 Let's compute the output one a short example. -.. GENERATED FROM PYTHON SOURCE LINES 123-130 +.. GENERATED FROM PYTHON SOURCE LINES 120-127 .. code-block:: default @@ -236,8 +229,6 @@ Let's compute the output one a short example. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 0 1 @@ -250,11 +241,11 @@ Let's compute the output one a short example. -.. GENERATED FROM PYTHON SOURCE LINES 131-132 +.. GENERATED FROM PYTHON SOURCE LINES 128-129 Let's check the ONNX conversion produces the same results. -.. GENERATED FROM PYTHON SOURCE LINES 132-138 +.. GENERATED FROM PYTHON SOURCE LINES 129-135 .. code-block:: default @@ -270,8 +261,6 @@ Let's check the ONNX conversion produces the same results. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[1 1] @@ -283,7 +272,7 @@ Let's check the ONNX conversion produces the same results. -.. GENERATED FROM PYTHON SOURCE LINES 139-146 +.. GENERATED FROM PYTHON SOURCE LINES 136-143 That works. @@ -293,7 +282,7 @@ Custom converter for WOEEncoder We start from example :ref:`l-plot-custom-converter` and then write the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 146-222 +.. GENERATED FROM PYTHON SOURCE LINES 143-219 .. code-block:: default @@ -380,11 +369,11 @@ and then write the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 223-224 +.. GENERATED FROM PYTHON SOURCE LINES 220-221 Let's compute the output one a short example. -.. GENERATED FROM PYTHON SOURCE LINES 224-229 +.. GENERATED FROM PYTHON SOURCE LINES 221-226 .. code-block:: default @@ -399,8 +388,6 @@ Let's compute the output one a short example. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 0 1 @@ -413,11 +400,11 @@ Let's compute the output one a short example. -.. GENERATED FROM PYTHON SOURCE LINES 230-231 +.. GENERATED FROM PYTHON SOURCE LINES 227-228 Let's check the ONNX conversion produces the same results. -.. GENERATED FROM PYTHON SOURCE LINES 231-236 +.. GENERATED FROM PYTHON SOURCE LINES 228-233 .. code-block:: default @@ -432,8 +419,6 @@ Let's check the ONNX conversion produces the same results. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[-1.4057125 -0.03594739] @@ -448,35 +433,23 @@ Let's check the ONNX conversion produces the same results. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.459 seconds) + **Total running time of the script:** ( 0 minutes 0.173 seconds) .. _sphx_glr_download_auto_tutorial_plot_catwoe_transformer.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_catwoe_transformer.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_catwoe_transformer.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_catwoe_transformer.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_catwoe_transformer.ipynb ` + :download:`Download Jupyter notebook: plot_catwoe_transformer.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_cbegin_opset.rst.txt b/_sources/auto_tutorial/plot_cbegin_opset.rst.txt index a484b73ff..fb4a05010 100644 --- a/_sources/auto_tutorial/plot_cbegin_opset.rst.txt +++ b/_sources/auto_tutorial/plot_cbegin_opset.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_cbegin_opset.py" +.. "auto_tutorial/plot_cbegin_opset.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -45,15 +45,12 @@ This example considers an `IsolationForest `_ and digs into opsets. -.. contents:: - :local: - Data ++++ A simple example. -.. GENERATED FROM PYTHON SOURCE LINES 39-57 +.. GENERATED FROM PYTHON SOURCE LINES 36-54 .. code-block:: default @@ -87,12 +84,12 @@ A simple example. -.. GENERATED FROM PYTHON SOURCE LINES 58-60 +.. GENERATED FROM PYTHON SOURCE LINES 55-57 ONNX ++++ -.. GENERATED FROM PYTHON SOURCE LINES 60-66 +.. GENERATED FROM PYTHON SOURCE LINES 57-63 .. code-block:: default @@ -108,13 +105,11 @@ ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ir_version: 8 producer_name: "skl2onnx" - producer_version: "1.11.2" + producer_version: "1.14.0" domain: "ai.onnx" model_version: 0 doc_string: "" @@ -144,99 +139,87 @@ ONNX } attribute { name: "nodes_falsenodeids" - ints: 54 - ints: 25 - ints: 6 - ints: 5 - ints: 0 - ints: 0 - ints: 16 - ints: 15 - ints: 12 + ints: 52 ints: 11 + ints: 4 ints: 0 + ints: 10 + ints: 7 ints: 0 - ints: 14 + ints: 9 ints: 0 ints: 0 ints: 0 - ints: 18 + ints: 35 + ints: 20 + ints: 15 ints: 0 - ints: 22 - ints: 21 + ints: 17 + ints: 0 + ints: 19 ints: 0 ints: 0 + ints: 28 + ints: 25 ints: 24 ints: 0 ints: 0 - ints: 39 - ints: 30 - ints: 29 + ints: 27 ints: 0 ints: 0 ints: 32 - ints: 0 - ints: 36 - ints: 35 + ints: 31 ints: 0 ints: 0 - ints: 38 + ints: 34 ints: 0 ints: 0 - ints: 47 - ints: 46 ints: 45 ints: 44 + ints: 41 + ints: 40 ints: 0 ints: 0 + ints: 43 + ints: 0 ints: 0 ints: 0 + ints: 51 + ints: 50 ints: 49 ints: 0 - ints: 53 - ints: 52 ints: 0 ints: 0 ints: 0 ints: 56 - ints: 0 - ints: 72 - ints: 67 - ints: 62 - ints: 61 - ints: 0 - ints: 0 - ints: 66 - ints: 65 - ints: 0 - ints: 0 - ints: 0 - ints: 71 - ints: 70 + ints: 55 ints: 0 ints: 0 + ints: 58 ints: 0 ints: 0 type: INTS } attribute { name: "nodes_featureids" - ints: 0 ints: 1 ints: 1 ints: 0 ints: 0 ints: 0 - ints: 1 ints: 0 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 + ints: 1 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 @@ -247,9 +230,6 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 1 - ints: 0 - ints: 0 ints: 0 ints: 0 ints: 0 @@ -258,37 +238,24 @@ ONNX ints: 0 ints: 0 ints: 1 - ints: 0 - ints: 0 ints: 1 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 ints: 1 - ints: 0 - ints: 0 + ints: 1 ints: 0 ints: 0 ints: 1 ints: 0 ints: 0 ints: 0 - ints: 0 + ints: 1 ints: 1 ints: 0 ints: 0 - ints: 1 ints: 0 ints: 0 ints: 0 ints: 0 - ints: 1 + ints: 0 ints: 0 ints: 0 ints: 0 @@ -357,20 +324,6 @@ ONNX floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 type: FLOATS } attribute { @@ -434,20 +387,6 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -455,24 +394,19 @@ ONNX strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" @@ -484,6 +418,7 @@ ONNX strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" @@ -497,32 +432,22 @@ ONNX strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "LEAF" - strings: "BRANCH_LEQ" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" type: STRINGS @@ -588,20 +513,6 @@ ONNX ints: 56 ints: 57 ints: 58 - ints: 59 - ints: 60 - ints: 61 - ints: 62 - ints: 63 - ints: 64 - ints: 65 - ints: 66 - ints: 67 - ints: 68 - ints: 69 - ints: 70 - ints: 71 - ints: 72 type: INTS } attribute { @@ -665,20 +576,6 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -686,151 +583,123 @@ ONNX ints: 1 ints: 2 ints: 3 - ints: 4 ints: 0 + ints: 5 + ints: 6 ints: 0 - ints: 7 ints: 8 - ints: 9 - ints: 10 ints: 0 ints: 0 - ints: 13 - ints: 0 ints: 0 + ints: 12 + ints: 13 + ints: 14 ints: 0 - ints: 17 + ints: 16 ints: 0 - ints: 19 - ints: 20 + ints: 18 ints: 0 ints: 0 + ints: 21 + ints: 22 ints: 23 ints: 0 ints: 0 ints: 26 - ints: 27 - ints: 28 ints: 0 ints: 0 - ints: 31 + ints: 29 + ints: 30 + ints: 0 ints: 0 ints: 33 - ints: 34 ints: 0 ints: 0 + ints: 36 ints: 37 + ints: 38 + ints: 39 ints: 0 ints: 0 - ints: 40 - ints: 41 ints: 42 - ints: 43 - ints: 0 ints: 0 ints: 0 ints: 0 + ints: 46 + ints: 47 ints: 48 ints: 0 - ints: 50 - ints: 51 - ints: 0 - ints: 0 - ints: 0 - ints: 55 - ints: 0 - ints: 57 - ints: 58 - ints: 59 - ints: 60 - ints: 0 - ints: 0 - ints: 63 - ints: 64 ints: 0 ints: 0 ints: 0 - ints: 68 - ints: 69 + ints: 53 + ints: 54 ints: 0 ints: 0 + ints: 57 ints: 0 ints: 0 type: INTS } attribute { name: "nodes_values" - floats: 10.021278381347656 - floats: 3.7887494564056396 - floats: -7.121064186096191 - floats: 4.416437149047852 - floats: 0.0 - floats: 0.0 - floats: -1.7588245868682861 - floats: 7.825988292694092 - floats: 4.649615287780762 - floats: 3.2951412200927734 + floats: 9.529556274414062 + floats: 0.06639961153268814 + floats: -1.2243356704711914 floats: 0.0 + floats: 0.1448548436164856 + floats: -1.0675677061080933 floats: 0.0 - floats: 6.6710991859436035 + floats: -0.4722660183906555 floats: 0.0 floats: 0.0 floats: 0.0 - floats: 4.765402317047119 + floats: 6.237240314483643 + floats: -1.5116932392120361 + floats: 0.8247517943382263 floats: 0.0 - floats: 6.541184425354004 - floats: -1.2423985004425049 + floats: -5.144781589508057 floats: 0.0 + floats: 5.4886579513549805 floats: 0.0 - floats: 7.697707176208496 floats: 0.0 + floats: -0.7253645658493042 + floats: 1.3656964302062988 + floats: -0.8799896836280823 floats: 0.0 - floats: 9.153670310974121 - floats: 6.013628005981445 - floats: 8.105496406555176 floats: 0.0 + floats: -1.0190058946609497 floats: 0.0 - floats: 7.426864147186279 floats: 0.0 - floats: 8.715617179870605 - floats: 8.431848526000977 + floats: -0.49304017424583435 + floats: -0.5850936770439148 floats: 0.0 floats: 0.0 - floats: 7.730372905731201 + floats: 1.2917989492416382 floats: 0.0 floats: 0.0 - floats: 6.437074661254883 - floats: 9.96568489074707 - floats: 9.521824836730957 - floats: 9.370474815368652 + floats: 7.957119941711426 + floats: 7.862874507904053 + floats: 7.656042575836182 + floats: 7.188352584838867 floats: 0.0 floats: 0.0 + floats: 7.790811538696289 floats: 0.0 floats: 0.0 - floats: 9.491046905517578 floats: 0.0 - floats: 8.651312828063965 - floats: 9.867877006530762 + floats: 9.259439468383789 + floats: 8.948318481445312 + floats: -4.675846576690674 floats: 0.0 floats: 0.0 floats: 0.0 - floats: 5.1377949714660645 floats: 0.0 - floats: 11.692091941833496 - floats: 10.630556106567383 - floats: 10.18297004699707 - floats: 8.508511543273926 - floats: 0.0 - floats: 0.0 - floats: 7.681608200073242 - floats: 10.399255752563477 - floats: 0.0 - floats: 0.0 - floats: 0.0 - floats: 8.249090194702148 - floats: 11.212265014648438 + floats: -3.819464921951294 + floats: -4.20181941986084 floats: 0.0 floats: 0.0 + floats: -3.639735460281372 floats: 0.0 floats: 0.0 type: FLOATS @@ -872,54 +741,40 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { name: "target_nodeids" - ints: 4 - ints: 5 + ints: 3 + ints: 6 + ints: 8 + ints: 9 ints: 10 - ints: 11 - ints: 13 ints: 14 - ints: 15 - ints: 17 - ints: 20 - ints: 21 + ints: 16 + ints: 18 + ints: 19 ints: 23 ints: 24 - ints: 28 - ints: 29 + ints: 26 + ints: 27 + ints: 30 ints: 31 + ints: 33 ints: 34 - ints: 35 - ints: 37 - ints: 38 + ints: 39 + ints: 40 + ints: 42 ints: 43 ints: 44 - ints: 45 - ints: 46 ints: 48 + ints: 49 + ints: 50 ints: 51 - ints: 52 - ints: 53 + ints: 54 ints: 55 - ints: 60 - ints: 61 - ints: 64 - ints: 65 - ints: 66 - ints: 69 - ints: 70 - ints: 71 - ints: 72 + ints: 57 + ints: 58 type: INTS } attribute { @@ -954,54 +809,40 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { name: "target_weights" - floats: 4.0 - floats: 5.0 + floats: 3.0 + floats: 6.0 + floats: 8.0 + floats: 9.0 floats: 10.0 - floats: 11.0 - floats: 13.0 floats: 14.0 - floats: 15.0 - floats: 17.0 - floats: 20.0 - floats: 21.0 + floats: 16.0 + floats: 18.0 + floats: 19.0 floats: 23.0 floats: 24.0 - floats: 28.0 - floats: 29.0 + floats: 26.0 + floats: 27.0 + floats: 30.0 floats: 31.0 + floats: 33.0 floats: 34.0 - floats: 35.0 - floats: 37.0 - floats: 38.0 + floats: 39.0 + floats: 40.0 + floats: 42.0 floats: 43.0 floats: 44.0 - floats: 45.0 - floats: 46.0 floats: 48.0 + floats: 49.0 + floats: 50.0 floats: 51.0 - floats: 52.0 - floats: 53.0 + floats: 54.0 floats: 55.0 - floats: 60.0 - floats: 61.0 - floats: 64.0 - floats: 65.0 - floats: 66.0 - floats: 69.0 - floats: 70.0 - floats: 71.0 - floats: 72.0 + floats: 57.0 + floats: 58.0 type: FLOATS } domain: "ai.onnx.ml" @@ -1009,8 +850,8 @@ ONNX node { input: "X" input: "node_sample0_Gathercst" - output: "node_sample1_output0" - name: "node_sample1_Gather" + output: "node_sample2_output0" + name: "node_sample2_Gather" op_type: "Gather" attribute { name: "axis" @@ -1022,8 +863,8 @@ ONNX node { input: "X" input: "node_sample0_Gathercst" - output: "node_sample2_output0" - name: "node_sample2_Gather" + output: "node_sample1_output0" + name: "node_sample1_Gather" op_type: "Gather" attribute { name: "axis" @@ -1033,33 +874,9 @@ ONNX domain: "" } node { - input: "node_sample0_Y0" - output: "node_sample0_output02" - name: "node_sample0_Cast" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } - node { - input: "node_sample0_Y0" - output: "path_length0_output0" - name: "path_length0_Cast" - op_type: "Cast" - attribute { - name: "to" - i: 7 - type: INT - } - domain: "" - } - node { - input: "node_sample1_output0" - output: "node_sample1_Y0" - name: "node_sample1_TreeEnsembleRegressor" + input: "node_sample2_output0" + output: "node_sample2_Y0" + name: "node_sample2_TreeEnsembleRegressor" op_type: "TreeEnsembleRegressor" attribute { name: "n_targets" @@ -1068,66 +885,66 @@ ONNX } attribute { name: "nodes_falsenodeids" - ints: 30 - ints: 29 - ints: 6 - ints: 5 + ints: 72 + ints: 45 + ints: 22 + ints: 17 + ints: 12 + ints: 9 + ints: 8 ints: 0 ints: 0 - ints: 16 - ints: 9 + ints: 11 ints: 0 - ints: 13 - ints: 12 ints: 0 + ints: 14 ints: 0 - ints: 15 + ints: 16 ints: 0 ints: 0 - ints: 24 - ints: 21 - ints: 20 + ints: 19 ints: 0 + ints: 21 ints: 0 - ints: 23 ints: 0 + ints: 30 + ints: 25 ints: 0 + ints: 29 ints: 28 - ints: 27 - ints: 0 ints: 0 ints: 0 ints: 0 - ints: 48 - ints: 33 + ints: 38 + ints: 35 + ints: 34 ints: 0 - ints: 43 - ints: 36 ints: 0 - ints: 40 - ints: 39 + ints: 37 ints: 0 ints: 0 ints: 42 + ints: 41 ints: 0 ints: 0 - ints: 45 - ints: 0 - ints: 47 + ints: 44 ints: 0 ints: 0 - ints: 66 - ints: 63 + ints: 69 ints: 58 - ints: 55 - ints: 54 + ints: 53 + ints: 52 + ints: 51 ints: 0 ints: 0 - ints: 57 ints: 0 + ints: 55 + ints: 0 + ints: 57 ints: 0 - ints: 60 ints: 0 + ints: 66 + ints: 63 ints: 62 ints: 0 ints: 0 @@ -1136,10 +953,40 @@ ONNX ints: 0 ints: 68 ints: 0 - ints: 72 + ints: 0 ints: 71 ints: 0 ints: 0 + ints: 74 + ints: 0 + ints: 96 + ints: 83 + ints: 78 + ints: 0 + ints: 82 + ints: 81 + ints: 0 + ints: 0 + ints: 0 + ints: 91 + ints: 88 + ints: 87 + ints: 0 + ints: 0 + ints: 90 + ints: 0 + ints: 0 + ints: 95 + ints: 94 + ints: 0 + ints: 0 + ints: 0 + ints: 102 + ints: 99 + ints: 0 + ints: 101 + ints: 0 + ints: 0 ints: 0 type: INTS } @@ -1148,40 +995,50 @@ ONNX ints: 1 ints: 1 ints: 1 + ints: 1 + ints: 1 + ints: 1 + ints: 1 + ints: 0 + ints: 0 ints: 0 ints: 0 ints: 0 ints: 1 + ints: 0 ints: 1 ints: 0 ints: 0 ints: 1 ints: 0 + ints: 1 + ints: 0 ints: 0 ints: 1 ints: 0 ints: 0 + ints: 0 ints: 1 ints: 0 ints: 0 ints: 0 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 ints: 1 ints: 0 ints: 0 + ints: 1 + ints: 1 ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 - ints: 1 - ints: 1 ints: 0 - ints: 1 ints: 0 ints: 0 ints: 0 @@ -1196,14 +1053,22 @@ ONNX ints: 0 ints: 1 ints: 0 + ints: 0 + ints: 0 ints: 1 + ints: 0 + ints: 0 ints: 1 ints: 0 ints: 0 + ints: 1 + ints: 0 ints: 0 ints: 0 ints: 0 ints: 1 + ints: 1 + ints: 1 ints: 0 ints: 1 ints: 0 @@ -1211,13 +1076,25 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 1 ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 1 + ints: 1 + ints: 0 + ints: 1 + ints: 0 + ints: 0 + ints: 0 type: INTS } attribute { @@ -1295,24 +1172,84 @@ ONNX floats: 1.0 floats: 1.0 floats: 1.0 - type: FLOATS - } - attribute { - name: "nodes_missing_value_tracks_true" - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + floats: 1.0 + type: FLOATS + } + attribute { + name: "nodes_missing_value_tracks_true" + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 ints: 0 ints: 0 ints: 0 @@ -1380,16 +1317,30 @@ ONNX strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "LEAF" + strings: "LEAF" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" + strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "LEAF" + strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "LEAF" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" @@ -1404,49 +1355,65 @@ ONNX strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "LEAF" + strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" + strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" - strings: "LEAF" + strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "BRANCH_LEQ" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" + strings: "BRANCH_LEQ" + strings: "LEAF" + strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" + strings: "BRANCH_LEQ" + strings: "LEAF" strings: "LEAF" strings: "LEAF" type: STRINGS @@ -1526,6 +1493,36 @@ ONNX ints: 70 ints: 71 ints: 72 + ints: 73 + ints: 74 + ints: 75 + ints: 76 + ints: 77 + ints: 78 + ints: 79 + ints: 80 + ints: 81 + ints: 82 + ints: 83 + ints: 84 + ints: 85 + ints: 86 + ints: 87 + ints: 88 + ints: 89 + ints: 90 + ints: 91 + ints: 92 + ints: 93 + ints: 94 + ints: 95 + ints: 96 + ints: 97 + ints: 98 + ints: 99 + ints: 100 + ints: 101 + ints: 102 type: INTS } attribute { @@ -1603,6 +1600,36 @@ ONNX ints: 0 ints: 0 ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 type: INTS } attribute { @@ -1611,62 +1638,62 @@ ONNX ints: 2 ints: 3 ints: 4 - ints: 0 - ints: 0 + ints: 5 + ints: 6 ints: 7 - ints: 8 + ints: 0 ints: 0 ints: 10 - ints: 11 ints: 0 ints: 0 - ints: 14 + ints: 13 + ints: 0 + ints: 15 ints: 0 ints: 0 - ints: 17 ints: 18 - ints: 19 ints: 0 + ints: 20 ints: 0 - ints: 22 ints: 0 + ints: 23 + ints: 24 ints: 0 - ints: 25 ints: 26 - ints: 0 + ints: 27 ints: 0 ints: 0 ints: 0 ints: 31 ints: 32 + ints: 33 ints: 0 - ints: 34 - ints: 35 - ints: 0 - ints: 37 - ints: 38 ints: 0 + ints: 36 ints: 0 - ints: 41 ints: 0 + ints: 39 + ints: 40 ints: 0 - ints: 44 ints: 0 - ints: 46 + ints: 43 ints: 0 ints: 0 + ints: 46 + ints: 47 + ints: 48 ints: 49 ints: 50 - ints: 51 - ints: 52 - ints: 53 ints: 0 ints: 0 + ints: 0 + ints: 54 + ints: 0 ints: 56 ints: 0 ints: 0 ints: 59 - ints: 0 + ints: 60 ints: 61 ints: 0 ints: 0 @@ -1675,94 +1702,154 @@ ONNX ints: 0 ints: 67 ints: 0 - ints: 69 + ints: 0 ints: 70 ints: 0 ints: 0 + ints: 73 + ints: 0 + ints: 75 + ints: 76 + ints: 77 + ints: 0 + ints: 79 + ints: 80 + ints: 0 + ints: 0 + ints: 0 + ints: 84 + ints: 85 + ints: 86 + ints: 0 + ints: 0 + ints: 89 + ints: 0 + ints: 0 + ints: 92 + ints: 93 + ints: 0 + ints: 0 + ints: 0 + ints: 97 + ints: 98 + ints: 0 + ints: 100 + ints: 0 + ints: 0 ints: 0 type: INTS } attribute { name: "nodes_values" - floats: 3.8512446880340576 - floats: -0.5676265358924866 - floats: -6.980898380279541 - floats: 4.714104175567627 + floats: 5.84586238861084 + floats: 3.3479835987091064 + floats: 1.3570774793624878 + floats: 1.2792494297027588 + floats: 0.8155440092086792 + floats: -0.14481677114963531 + floats: -0.6906545162200928 floats: 0.0 floats: 0.0 - floats: -5.093116760253906 - floats: -6.655633926391602 + floats: -0.7135818004608154 floats: 0.0 - floats: 2.331597328186035 - floats: -5.791065692901611 floats: 0.0 + floats: 0.9292002320289612 floats: 0.0 - floats: -5.753363609313965 + floats: 1.0187170505523682 floats: 0.0 floats: 0.0 - floats: -3.8087997436523438 - floats: 5.919134140014648 - floats: 2.3382115364074707 + floats: 1.3404710292816162 floats: 0.0 + floats: 1.343726634979248 floats: 0.0 - floats: 6.681684494018555 floats: 0.0 + floats: 1.8512831926345825 + floats: -0.5900801420211792 floats: 0.0 - floats: -1.0638467073440552 - floats: 5.241893768310547 + floats: -0.019533302634954453 + floats: 1.626607060432434 floats: 0.0 floats: 0.0 floats: 0.0 + floats: -0.19708473980426788 + floats: 2.966790199279785 + floats: -1.8903709650039673 floats: 0.0 - floats: 9.605061531066895 - floats: 7.560668468475342 floats: 0.0 - floats: 8.219744682312012 - floats: 5.37798547744751 + floats: 3.3281314373016357 floats: 0.0 - floats: 7.676764011383057 - floats: 8.128286361694336 floats: 0.0 + floats: 2.2592618465423584 + floats: 2.113555669784546 floats: 0.0 - floats: 9.215782165527344 floats: 0.0 + floats: 0.04144952446222305 floats: 0.0 - floats: 8.422836303710938 floats: 0.0 - floats: 8.757240295410156 + floats: -0.7720679640769958 + floats: -2.6698925495147705 + floats: -3.359083414077759 + floats: -3.819303274154663 + floats: -3.8522212505340576 floats: 0.0 floats: 0.0 - floats: 10.749845504760742 - floats: 8.670648574829102 - floats: 10.19068717956543 - floats: 7.231447219848633 - floats: 5.572278022766113 floats: 0.0 + floats: 3.8754918575286865 floats: 0.0 - floats: 9.892742156982422 + floats: -2.76548433303833 floats: 0.0 floats: 0.0 - floats: 5.543532371520996 + floats: -1.5094605684280396 + floats: 4.876166343688965 + floats: -2.066214084625244 floats: 0.0 - floats: 8.146027565002441 floats: 0.0 + floats: 5.31315803527832 floats: 0.0 - floats: 10.089201927185059 floats: 0.0 + floats: 5.141677379608154 floats: 0.0 - floats: 7.585289001464844 floats: 0.0 - floats: 11.574915885925293 - floats: 11.187359809875488 + floats: 4.081350326538086 floats: 0.0 floats: 0.0 + floats: -6.860238075256348 floats: 0.0 - type: FLOATS - } - attribute { - name: "post_transform" - s: "NONE" - type: STRING + floats: 9.618461608886719 + floats: 7.555417537689209 + floats: 6.563416481018066 + floats: 0.0 + floats: 7.313610076904297 + floats: -5.703146934509277 + floats: 0.0 + floats: 0.0 + floats: 0.0 + floats: -4.049132823944092 + floats: -4.672673225402832 + floats: -5.863733291625977 + floats: 0.0 + floats: 0.0 + floats: -4.3013410568237305 + floats: 0.0 + floats: 0.0 + floats: -2.4979329109191895 + floats: -3.212557315826416 + floats: 0.0 + floats: 0.0 + floats: 0.0 + floats: 9.996405601501465 + floats: 9.734021186828613 + floats: 0.0 + floats: 9.765551567077637 + floats: 0.0 + floats: 0.0 + floats: 0.0 + type: FLOATS + } + attribute { + name: "post_transform" + s: "NONE" + type: STRING } attribute { name: "target_ids" @@ -1803,47 +1890,77 @@ ONNX ints: 0 ints: 0 ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 type: INTS } attribute { name: "target_nodeids" - ints: 4 - ints: 5 + ints: 7 ints: 8 + ints: 10 ints: 11 - ints: 12 - ints: 14 + ints: 13 ints: 15 - ints: 19 + ints: 16 + ints: 18 ints: 20 - ints: 22 - ints: 23 - ints: 26 + ints: 21 + ints: 24 ints: 27 ints: 28 ints: 29 - ints: 32 - ints: 35 - ints: 38 - ints: 39 + ints: 33 + ints: 34 + ints: 36 + ints: 37 + ints: 40 ints: 41 - ints: 42 + ints: 43 ints: 44 - ints: 46 - ints: 47 - ints: 53 + ints: 50 + ints: 51 + ints: 52 ints: 54 ints: 56 ints: 57 - ints: 59 ints: 61 ints: 62 ints: 64 ints: 65 ints: 67 + ints: 68 ints: 70 ints: 71 - ints: 72 + ints: 73 + ints: 77 + ints: 80 + ints: 81 + ints: 82 + ints: 86 + ints: 87 + ints: 89 + ints: 90 + ints: 93 + ints: 94 + ints: 95 + ints: 98 + ints: 100 + ints: 101 + ints: 102 type: INTS } attribute { @@ -1885,55 +2002,97 @@ ONNX ints: 0 ints: 0 ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 + ints: 0 type: INTS } attribute { name: "target_weights" - floats: 4.0 - floats: 5.0 + floats: 7.0 floats: 8.0 + floats: 10.0 floats: 11.0 - floats: 12.0 - floats: 14.0 + floats: 13.0 floats: 15.0 - floats: 19.0 + floats: 16.0 + floats: 18.0 floats: 20.0 - floats: 22.0 - floats: 23.0 - floats: 26.0 + floats: 21.0 + floats: 24.0 floats: 27.0 floats: 28.0 floats: 29.0 - floats: 32.0 - floats: 35.0 - floats: 38.0 - floats: 39.0 + floats: 33.0 + floats: 34.0 + floats: 36.0 + floats: 37.0 + floats: 40.0 floats: 41.0 - floats: 42.0 + floats: 43.0 floats: 44.0 - floats: 46.0 - floats: 47.0 - floats: 53.0 + floats: 50.0 + floats: 51.0 + floats: 52.0 floats: 54.0 floats: 56.0 floats: 57.0 - floats: 59.0 floats: 61.0 floats: 62.0 floats: 64.0 floats: 65.0 floats: 67.0 + floats: 68.0 floats: 70.0 floats: 71.0 - floats: 72.0 + floats: 73.0 + floats: 77.0 + floats: 80.0 + floats: 81.0 + floats: 82.0 + floats: 86.0 + floats: 87.0 + floats: 89.0 + floats: 90.0 + floats: 93.0 + floats: 94.0 + floats: 95.0 + floats: 98.0 + floats: 100.0 + floats: 101.0 + floats: 102.0 type: FLOATS } domain: "ai.onnx.ml" } node { - input: "node_sample2_output0" - output: "node_sample2_Y0" - name: "node_sample2_TreeEnsembleRegressor" + input: "node_sample0_Y0" + output: "node_sample0_output02" + name: "node_sample0_Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } + node { + input: "node_sample1_output0" + output: "node_sample1_Y0" + name: "node_sample1_TreeEnsembleRegressor" op_type: "TreeEnsembleRegressor" attribute { name: "n_targets" @@ -1943,120 +2102,109 @@ ONNX attribute { name: "nodes_falsenodeids" ints: 56 - ints: 41 - ints: 22 - ints: 19 - ints: 12 - ints: 9 - ints: 8 + ints: 33 + ints: 14 + ints: 5 ints: 0 + ints: 13 + ints: 10 + ints: 9 ints: 0 - ints: 11 ints: 0 + ints: 12 ints: 0 - ints: 16 - ints: 15 ints: 0 ints: 0 - ints: 18 + ints: 20 + ints: 17 ints: 0 + ints: 19 ints: 0 - ints: 21 ints: 0 + ints: 26 + ints: 23 ints: 0 - ints: 38 - ints: 31 - ints: 28 - ints: 27 + ints: 25 ints: 0 ints: 0 ints: 30 + ints: 29 ints: 0 ints: 0 - ints: 35 - ints: 34 - ints: 0 + ints: 32 ints: 0 - ints: 37 ints: 0 + ints: 35 ints: 0 + ints: 43 + ints: 42 + ints: 41 ints: 40 ints: 0 ints: 0 - ints: 55 - ints: 52 - ints: 49 + ints: 0 + ints: 0 + ints: 51 ints: 48 ints: 47 ints: 0 ints: 0 - ints: 0 - ints: 51 + ints: 50 ints: 0 ints: 0 + ints: 55 ints: 54 ints: 0 ints: 0 ints: 0 - ints: 68 - ints: 67 - ints: 60 - ints: 0 - ints: 66 + ints: 70 ints: 65 - ints: 64 + ints: 62 + ints: 61 ints: 0 ints: 0 + ints: 64 ints: 0 ints: 0 + ints: 67 ints: 0 - ints: 84 - ints: 83 - ints: 78 - ints: 75 - ints: 74 + ints: 69 ints: 0 ints: 0 - ints: 77 + ints: 72 ints: 0 ints: 0 - ints: 82 - ints: 81 + type: INTS + } + attribute { + name: "nodes_featureids" ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 - ints: 92 - ints: 91 - ints: 88 + ints: 1 ints: 0 - ints: 90 + ints: 1 ints: 0 ints: 0 + ints: 1 ints: 0 - ints: 98 - ints: 97 - ints: 96 ints: 0 ints: 0 + ints: 1 + ints: 0 ints: 0 - ints: 102 - ints: 101 ints: 0 ints: 0 - ints: 104 ints: 0 ints: 0 - type: INTS - } - attribute { - name: "nodes_featureids" - ints: 1 ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 + ints: 1 ints: 0 ints: 0 ints: 0 @@ -2066,6 +2214,7 @@ ONNX ints: 0 ints: 0 ints: 0 + ints: 1 ints: 0 ints: 1 ints: 0 @@ -2074,30 +2223,6 @@ ONNX ints: 0 ints: 0 ints: 1 - ints: 1 - ints: 1 - ints: 1 - ints: 0 - ints: 0 - ints: 1 - ints: 0 - ints: 0 - ints: 1 - ints: 1 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 1 - ints: 0 - ints: 0 - ints: 1 - ints: 1 - ints: 0 ints: 0 ints: 0 ints: 0 @@ -2105,53 +2230,23 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 1 - ints: 1 - ints: 0 - ints: 0 - ints: 0 - ints: 1 ints: 1 ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 1 ints: 1 ints: 1 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 1 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 1 ints: 1 - ints: 1 - ints: 0 ints: 0 ints: 0 ints: 0 ints: 0 - ints: 1 ints: 0 ints: 1 ints: 0 ints: 0 ints: 0 - ints: 1 - ints: 0 - ints: 0 ints: 0 ints: 1 ints: 0 @@ -2233,38 +2328,6 @@ ONNX floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 type: FLOATS } attribute { @@ -2342,38 +2405,6 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -2382,14 +2413,8 @@ ONNX strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" @@ -2397,14 +2422,9 @@ ONNX strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" @@ -2412,19 +2432,11 @@ ONNX strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" - strings: "LEAF" strings: "LEAF" strings: "LEAF" strings: "BRANCH_LEQ" @@ -2432,22 +2444,14 @@ ONNX strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" - strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "LEAF" - strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" strings: "LEAF" strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" @@ -2461,22 +2465,17 @@ ONNX strings: "LEAF" strings: "LEAF" strings: "LEAF" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" - strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "LEAF" - strings: "BRANCH_LEQ" - strings: "BRANCH_LEQ" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" - strings: "LEAF" strings: "BRANCH_LEQ" + strings: "LEAF" strings: "BRANCH_LEQ" strings: "LEAF" strings: "LEAF" @@ -2560,38 +2559,6 @@ ONNX ints: 70 ints: 71 ints: 72 - ints: 73 - ints: 74 - ints: 75 - ints: 76 - ints: 77 - ints: 78 - ints: 79 - ints: 80 - ints: 81 - ints: 82 - ints: 83 - ints: 84 - ints: 85 - ints: 86 - ints: 87 - ints: 88 - ints: 89 - ints: 90 - ints: 91 - ints: 92 - ints: 93 - ints: 94 - ints: 95 - ints: 96 - ints: 97 - ints: 98 - ints: 99 - ints: 100 - ints: 101 - ints: 102 - ints: 103 - ints: 104 type: INTS } attribute { @@ -2669,38 +2636,6 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { @@ -2709,54 +2644,54 @@ ONNX ints: 2 ints: 3 ints: 4 - ints: 5 + ints: 0 ints: 6 ints: 7 + ints: 8 ints: 0 ints: 0 - ints: 10 - ints: 0 + ints: 11 ints: 0 - ints: 13 - ints: 14 ints: 0 ints: 0 - ints: 17 + ints: 15 + ints: 16 ints: 0 + ints: 18 ints: 0 - ints: 20 ints: 0 + ints: 21 + ints: 22 ints: 0 - ints: 23 ints: 24 - ints: 25 - ints: 26 ints: 0 ints: 0 - ints: 29 + ints: 27 + ints: 28 ints: 0 ints: 0 - ints: 32 - ints: 33 + ints: 31 ints: 0 ints: 0 + ints: 34 + ints: 0 ints: 36 + ints: 37 + ints: 38 + ints: 39 ints: 0 ints: 0 - ints: 39 ints: 0 ints: 0 - ints: 42 - ints: 43 ints: 44 ints: 45 ints: 46 ints: 0 ints: 0 - ints: 0 - ints: 50 + ints: 49 ints: 0 ints: 0 + ints: 52 ints: 53 ints: 0 ints: 0 @@ -2764,159 +2699,95 @@ ONNX ints: 57 ints: 58 ints: 59 + ints: 60 + ints: 0 ints: 0 - ints: 61 - ints: 62 ints: 63 ints: 0 ints: 0 + ints: 66 ints: 0 + ints: 68 ints: 0 ints: 0 - ints: 69 - ints: 70 ints: 71 - ints: 72 - ints: 73 - ints: 0 - ints: 0 - ints: 76 - ints: 0 - ints: 0 - ints: 79 - ints: 80 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 85 - ints: 86 - ints: 87 - ints: 0 - ints: 89 - ints: 0 - ints: 0 - ints: 0 - ints: 93 - ints: 94 - ints: 95 - ints: 0 - ints: 0 - ints: 0 - ints: 99 - ints: 100 - ints: 0 - ints: 0 - ints: 103 ints: 0 ints: 0 type: INTS } attribute { name: "nodes_values" - floats: -1.9907585382461548 - floats: 6.839632511138916 - floats: 3.6026360988616943 - floats: 3.51365327835083 - floats: 2.635514974594116 - floats: 2.1891138553619385 - floats: 2.092236280441284 - floats: 0.0 - floats: 0.0 - floats: 2.345102310180664 - floats: 0.0 - floats: 0.0 - floats: 3.0827603340148926 - floats: 2.872518539428711 - floats: 0.0 - floats: 0.0 - floats: -4.910643100738525 - floats: 0.0 - floats: 0.0 - floats: 3.52667498588562 - floats: 0.0 - floats: 0.0 - floats: -2.4622914791107178 - floats: -6.068970680236816 - floats: -6.634273052215576 - floats: -6.811461448669434 - floats: 0.0 - floats: 0.0 - floats: -6.576728343963623 - floats: 0.0 - floats: 0.0 - floats: -3.355783224105835 - floats: -4.942788600921631 - floats: 0.0 + floats: 0.2054523527622223 + floats: 5.720078945159912 + floats: -2.2732791900634766 + floats: -4.630293369293213 floats: 0.0 - floats: 6.617758750915527 + floats: 5.411093711853027 + floats: -3.3866586685180664 + floats: 4.608852386474609 floats: 0.0 floats: 0.0 - floats: 5.890446186065674 + floats: 3.734891891479492 floats: 0.0 floats: 0.0 - floats: -2.2357113361358643 - floats: 7.418643474578857 - floats: 7.022924423217773 - floats: -2.456209421157837 - floats: -2.7438905239105225 floats: 0.0 + floats: -0.10917384922504425 + floats: -1.2374401092529297 floats: 0.0 + floats: -0.827874481678009 floats: 0.0 - floats: 7.2257399559021 floats: 0.0 + floats: -2.071647882461548 + floats: -2.102121114730835 floats: 0.0 - floats: 7.698291778564453 + floats: -2.0763232707977295 floats: 0.0 floats: 0.0 + floats: 0.6638698577880859 + floats: -0.8181525468826294 floats: 0.0 - floats: 0.8175256252288818 - floats: -0.7507614493370056 - floats: 4.318005084991455 floats: 0.0 - floats: 6.969293594360352 - floats: -1.5351403951644897 - floats: -1.659111738204956 + floats: 0.10006757825613022 floats: 0.0 floats: 0.0 + floats: -6.082978248596191 floats: 0.0 + floats: -4.585964202880859 + floats: 9.586073875427246 + floats: -4.862358570098877 + floats: 7.337919235229492 floats: 0.0 floats: 0.0 - floats: 9.485491752624512 - floats: 8.976116180419922 - floats: 8.002398490905762 - floats: 6.623079299926758 - floats: 9.125885009765625 floats: 0.0 floats: 0.0 - floats: 7.444906711578369 + floats: -3.677865743637085 + floats: 7.8631792068481445 + floats: -3.906221628189087 floats: 0.0 floats: 0.0 - floats: 8.453536033630371 - floats: 8.412899017333984 + floats: -4.509670257568359 floats: 0.0 floats: 0.0 + floats: -2.576967477798462 + floats: 8.915074348449707 floats: 0.0 floats: 0.0 - floats: 7.705773830413818 - floats: 7.476274013519287 - floats: 4.589108467102051 floats: 0.0 - floats: 10.508894920349121 + floats: 2.103733777999878 + floats: 0.5658510327339172 + floats: 1.2207704782485962 + floats: 0.022815637290477753 floats: 0.0 floats: 0.0 + floats: 1.3473073244094849 floats: 0.0 - floats: 8.615400314331055 - floats: 10.795989990234375 - floats: 8.093504905700684 floats: 0.0 + floats: 0.9728590846061707 floats: 0.0 + floats: 0.4070947468280792 floats: 0.0 - floats: 8.830120086669922 - floats: 10.28431224822998 floats: 0.0 - floats: 0.0 - floats: 8.931217193603516 + floats: 2.6099328994750977 floats: 0.0 floats: 0.0 type: FLOATS @@ -2965,79 +2836,47 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { name: "target_nodeids" - ints: 7 + ints: 4 ints: 8 - ints: 10 + ints: 9 ints: 11 - ints: 14 - ints: 15 - ints: 17 + ints: 12 + ints: 13 + ints: 16 ints: 18 - ints: 20 - ints: 21 - ints: 26 - ints: 27 + ints: 19 + ints: 22 + ints: 24 + ints: 25 + ints: 28 ints: 29 - ints: 30 - ints: 33 + ints: 31 + ints: 32 ints: 34 - ints: 36 - ints: 37 ints: 39 ints: 40 + ints: 41 + ints: 42 ints: 46 ints: 47 - ints: 48 + ints: 49 ints: 50 - ints: 51 ints: 53 ints: 54 ints: 55 - ints: 59 + ints: 60 + ints: 61 ints: 63 ints: 64 - ints: 65 ints: 66 - ints: 67 - ints: 73 - ints: 74 - ints: 76 - ints: 77 - ints: 80 - ints: 81 - ints: 82 - ints: 83 - ints: 87 - ints: 89 - ints: 90 - ints: 91 - ints: 95 - ints: 96 - ints: 97 - ints: 100 - ints: 101 - ints: 103 - ints: 104 + ints: 68 + ints: 69 + ints: 71 + ints: 72 type: INTS } attribute { @@ -3079,228 +2918,104 @@ ONNX ints: 0 ints: 0 ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 - ints: 0 type: INTS } attribute { name: "target_weights" - floats: 7.0 + floats: 4.0 floats: 8.0 - floats: 10.0 + floats: 9.0 floats: 11.0 - floats: 14.0 - floats: 15.0 - floats: 17.0 + floats: 12.0 + floats: 13.0 + floats: 16.0 floats: 18.0 - floats: 20.0 - floats: 21.0 - floats: 26.0 - floats: 27.0 + floats: 19.0 + floats: 22.0 + floats: 24.0 + floats: 25.0 + floats: 28.0 floats: 29.0 - floats: 30.0 - floats: 33.0 + floats: 31.0 + floats: 32.0 floats: 34.0 - floats: 36.0 - floats: 37.0 floats: 39.0 floats: 40.0 + floats: 41.0 + floats: 42.0 floats: 46.0 floats: 47.0 - floats: 48.0 + floats: 49.0 floats: 50.0 - floats: 51.0 floats: 53.0 floats: 54.0 floats: 55.0 - floats: 59.0 + floats: 60.0 + floats: 61.0 floats: 63.0 floats: 64.0 - floats: 65.0 floats: 66.0 - floats: 67.0 - floats: 73.0 - floats: 74.0 - floats: 76.0 - floats: 77.0 - floats: 80.0 - floats: 81.0 - floats: 82.0 - floats: 83.0 - floats: 87.0 - floats: 89.0 - floats: 90.0 - floats: 91.0 - floats: 95.0 - floats: 96.0 - floats: 97.0 - floats: 100.0 - floats: 101.0 - floats: 103.0 - floats: 104.0 + floats: 68.0 + floats: 69.0 + floats: 71.0 + floats: 72.0 type: FLOATS } domain: "ai.onnx.ml" } node { - input: "path_length0_output0" - output: "path_length0_Y0" - name: "path_length0_LabelEncoder" + input: "node_sample0_Y0" + output: "path_length0_output0" + name: "path_length0_Cast" + op_type: "Cast" + attribute { + name: "to" + i: 7 + type: INT + } + domain: "" + } + node { + input: "node_sample0_output02" + output: "node_sample0_Y02" + name: "node_sample0_LabelEncoder" op_type: "LabelEncoder" attribute { name: "keys_int64s" - ints: 4 - ints: 5 + ints: 3 + ints: 6 + ints: 8 + ints: 9 ints: 10 - ints: 11 - ints: 13 ints: 14 - ints: 15 - ints: 17 - ints: 20 - ints: 21 + ints: 16 + ints: 18 + ints: 19 ints: 23 ints: 24 - ints: 28 - ints: 29 + ints: 26 + ints: 27 + ints: 30 ints: 31 + ints: 33 ints: 34 - ints: 35 - ints: 37 - ints: 38 + ints: 39 + ints: 40 + ints: 42 ints: 43 ints: 44 - ints: 45 - ints: 46 ints: 48 + ints: 49 + ints: 50 ints: 51 - ints: 52 - ints: 53 + ints: 54 ints: 55 - ints: 60 - ints: 61 - ints: 64 - ints: 65 - ints: 66 - ints: 69 - ints: 70 - ints: 71 - ints: 72 + ints: 57 + ints: 58 type: INTS } attribute { name: "values_floats" - floats: 5.0 - floats: 5.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 6.0 - floats: 6.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 6.0 - floats: 6.0 - floats: 6.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 8.0 - floats: 7.0 - floats: 6.0 - floats: 6.0 - floats: 8.0 - floats: 8.0 - floats: 7.0 - floats: 3.0 - floats: 7.0 - floats: 7.0 - floats: 8.0 - floats: 8.0 - floats: 7.0 - floats: 7.0 - floats: 7.0 - floats: 6.0 - floats: 4.0 - type: FLOATS - } - domain: "ai.onnx.ml" - } - node { - input: "node_sample0_output02" - output: "node_sample0_Y02" - name: "node_sample0_LabelEncoder" - op_type: "LabelEncoder" - attribute { - name: "keys_int64s" - ints: 4 - ints: 5 - ints: 10 - ints: 11 - ints: 13 - ints: 14 - ints: 15 - ints: 17 - ints: 20 - ints: 21 - ints: 23 - ints: 24 - ints: 28 - ints: 29 - ints: 31 - ints: 34 - ints: 35 - ints: 37 - ints: 38 - ints: 43 - ints: 44 - ints: 45 - ints: 46 - ints: 48 - ints: 51 - ints: 52 - ints: 53 - ints: 55 - ints: 60 - ints: 61 - ints: 64 - ints: 65 - ints: 66 - ints: 69 - ints: 70 - ints: 71 - ints: 72 - type: INTS - } - attribute { - name: "values_floats" - floats: 1.0 - floats: 1.0 - floats: 12.0 - floats: 15.0 - floats: 21.0 - floats: 10.0 - floats: 1.0 - floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 @@ -3308,22 +3023,23 @@ ONNX floats: 1.0 floats: 1.0 floats: 1.0 - floats: 7.0 + floats: 27.0 + floats: 2.0 + floats: 4.0 floats: 1.0 floats: 3.0 floats: 1.0 floats: 1.0 + floats: 3.0 + floats: 8.0 + floats: 10.0 + floats: 5.0 + floats: 3.0 floats: 1.0 floats: 1.0 floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 2.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 + floats: 5.0 + floats: 11.0 floats: 1.0 floats: 1.0 floats: 1.0 @@ -3335,9 +3051,9 @@ ONNX domain: "ai.onnx.ml" } node { - input: "node_sample1_Y0" - output: "path_length1_output0" - name: "path_length1_Cast" + input: "node_sample2_Y0" + output: "path_length2_output0" + name: "path_length2_Cast" op_type: "Cast" attribute { name: "to" @@ -3360,8 +3076,8 @@ ONNX } node { input: "node_sample2_Y0" - output: "path_length2_output0" - name: "path_length2_Cast" + output: "node_sample2_output02" + name: "node_sample2_Cast" op_type: "Cast" attribute { name: "to" @@ -3371,9 +3087,9 @@ ONNX domain: "" } node { - input: "node_sample2_Y0" - output: "node_sample2_output02" - name: "node_sample2_Cast" + input: "node_sample1_Y0" + output: "path_length1_output0" + name: "path_length1_Cast" op_type: "Cast" attribute { name: "to" @@ -3383,23 +3099,85 @@ ONNX domain: "" } node { - input: "node_sample0_Y02" - input: "path_length0_Reshapecst" - output: "node_sample0_reshaped0" - name: "node_sample0_Reshape" - op_type: "Reshape" + input: "path_length0_output0" + output: "path_length0_Y0" + name: "path_length0_LabelEncoder" + op_type: "LabelEncoder" attribute { - name: "allowzero" - i: 0 - type: INT + name: "keys_int64s" + ints: 3 + ints: 6 + ints: 8 + ints: 9 + ints: 10 + ints: 14 + ints: 16 + ints: 18 + ints: 19 + ints: 23 + ints: 24 + ints: 26 + ints: 27 + ints: 30 + ints: 31 + ints: 33 + ints: 34 + ints: 39 + ints: 40 + ints: 42 + ints: 43 + ints: 44 + ints: 48 + ints: 49 + ints: 50 + ints: 51 + ints: 54 + ints: 55 + ints: 57 + ints: 58 + type: INTS } - domain: "" + attribute { + name: "values_floats" + floats: 4.0 + floats: 6.0 + floats: 7.0 + floats: 7.0 + floats: 5.0 + floats: 6.0 + floats: 7.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 6.0 + floats: 8.0 + floats: 8.0 + floats: 7.0 + floats: 6.0 + floats: 4.0 + floats: 4.0 + floats: 4.0 + floats: 4.0 + type: FLOATS + } + domain: "ai.onnx.ml" } node { - input: "path_length0_Y0" + input: "node_sample0_Y02" input: "path_length0_Reshapecst" - output: "path_length0_reshaped0" - name: "path_length0_Reshape" + output: "node_sample0_reshaped0" + name: "node_sample0_Reshape" op_type: "Reshape" attribute { name: "allowzero" @@ -3416,40 +3194,40 @@ ONNX attribute { name: "keys_int64s" ints: 4 - ints: 5 ints: 8 + ints: 9 ints: 11 ints: 12 - ints: 14 - ints: 15 + ints: 13 + ints: 16 + ints: 18 ints: 19 - ints: 20 ints: 22 - ints: 23 - ints: 26 - ints: 27 + ints: 24 + ints: 25 ints: 28 ints: 29 + ints: 31 ints: 32 - ints: 35 - ints: 38 + ints: 34 ints: 39 + ints: 40 ints: 41 ints: 42 - ints: 44 ints: 46 ints: 47 + ints: 49 + ints: 50 ints: 53 ints: 54 - ints: 56 - ints: 57 - ints: 59 + ints: 55 + ints: 60 ints: 61 - ints: 62 + ints: 63 ints: 64 - ints: 65 - ints: 67 - ints: 70 + ints: 66 + ints: 68 + ints: 69 ints: 71 ints: 72 type: INTS @@ -3457,125 +3235,155 @@ ONNX attribute { name: "values_floats" floats: 5.0 - floats: 5.0 - floats: 6.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 + floats: 6.0 + floats: 6.0 + floats: 7.0 + floats: 7.0 + floats: 7.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 - floats: 7.0 - floats: 3.0 floats: 4.0 - floats: 6.0 - floats: 8.0 - floats: 8.0 floats: 8.0 floats: 8.0 - floats: 6.0 - floats: 7.0 floats: 7.0 + floats: 6.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 - floats: 7.0 floats: 8.0 floats: 8.0 + floats: 7.0 + floats: 6.0 + floats: 6.0 floats: 6.0 floats: 6.0 floats: 5.0 - floats: 7.0 - floats: 7.0 floats: 6.0 + floats: 6.0 + floats: 4.0 + floats: 4.0 type: FLOATS } domain: "ai.onnx.ml" } node { - input: "node_sample1_output02" - output: "node_sample1_Y02" - name: "node_sample1_LabelEncoder" + input: "node_sample2_output02" + output: "node_sample2_Y02" + name: "node_sample2_LabelEncoder" op_type: "LabelEncoder" attribute { name: "keys_int64s" - ints: 4 - ints: 5 + ints: 7 ints: 8 + ints: 10 ints: 11 - ints: 12 - ints: 14 + ints: 13 ints: 15 - ints: 19 + ints: 16 + ints: 18 ints: 20 - ints: 22 - ints: 23 - ints: 26 + ints: 21 + ints: 24 ints: 27 ints: 28 ints: 29 - ints: 32 - ints: 35 - ints: 38 - ints: 39 + ints: 33 + ints: 34 + ints: 36 + ints: 37 + ints: 40 ints: 41 - ints: 42 + ints: 43 ints: 44 - ints: 46 - ints: 47 - ints: 53 + ints: 50 + ints: 51 + ints: 52 ints: 54 ints: 56 ints: 57 - ints: 59 ints: 61 ints: 62 ints: 64 ints: 65 ints: 67 + ints: 68 ints: 70 ints: 71 - ints: 72 + ints: 73 + ints: 77 + ints: 80 + ints: 81 + ints: 82 + ints: 86 + ints: 87 + ints: 89 + ints: 90 + ints: 93 + ints: 94 + ints: 95 + ints: 98 + ints: 100 + ints: 101 + ints: 102 type: INTS } attribute { name: "values_floats" floats: 1.0 + floats: 2.0 + floats: 4.0 + floats: 4.0 floats: 1.0 + floats: 2.0 + floats: 3.0 floats: 1.0 floats: 1.0 floats: 1.0 - floats: 14.0 - floats: 7.0 floats: 1.0 - floats: 6.0 + floats: 2.0 + floats: 2.0 + floats: 1.0 + floats: 3.0 + floats: 3.0 + floats: 1.0 + floats: 1.0 + floats: 2.0 + floats: 1.0 floats: 1.0 floats: 1.0 - floats: 5.0 - floats: 24.0 + floats: 2.0 floats: 1.0 floats: 1.0 floats: 1.0 + floats: 4.0 floats: 1.0 floats: 2.0 - floats: 9.0 + floats: 7.0 + floats: 3.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 - floats: 2.0 floats: 1.0 - floats: 2.0 floats: 1.0 - floats: 2.0 + floats: 4.0 + floats: 1.0 floats: 1.0 + floats: 9.0 + floats: 1.0 + floats: 2.0 + floats: 7.0 floats: 1.0 floats: 1.0 floats: 1.0 @@ -3597,55 +3405,54 @@ ONNX ints: 8 ints: 10 ints: 11 - ints: 14 + ints: 13 ints: 15 - ints: 17 + ints: 16 ints: 18 ints: 20 ints: 21 - ints: 26 + ints: 24 ints: 27 + ints: 28 ints: 29 - ints: 30 ints: 33 ints: 34 ints: 36 ints: 37 - ints: 39 ints: 40 - ints: 46 - ints: 47 - ints: 48 + ints: 41 + ints: 43 + ints: 44 ints: 50 ints: 51 - ints: 53 + ints: 52 ints: 54 - ints: 55 - ints: 59 - ints: 63 + ints: 56 + ints: 57 + ints: 61 + ints: 62 ints: 64 ints: 65 - ints: 66 ints: 67 + ints: 68 + ints: 70 + ints: 71 ints: 73 - ints: 74 - ints: 76 ints: 77 ints: 80 ints: 81 ints: 82 - ints: 83 + ints: 86 ints: 87 ints: 89 ints: 90 - ints: 91 + ints: 93 + ints: 94 ints: 95 - ints: 96 - ints: 97 + ints: 98 ints: 100 ints: 101 - ints: 103 - ints: 104 + ints: 102 type: INTS } attribute { @@ -3654,36 +3461,28 @@ ONNX floats: 8.0 floats: 8.0 floats: 8.0 - floats: 8.0 - floats: 8.0 + floats: 7.0 floats: 8.0 floats: 8.0 floats: 6.0 + floats: 7.0 + floats: 7.0 floats: 6.0 floats: 8.0 floats: 8.0 + floats: 7.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 floats: 8.0 - floats: 6.0 - floats: 6.0 floats: 8.0 floats: 8.0 - floats: 7.0 - floats: 7.0 - floats: 7.0 - floats: 6.0 - floats: 6.0 - floats: 4.0 - floats: 5.0 floats: 8.0 floats: 8.0 floats: 7.0 - floats: 6.0 - floats: 4.0 + floats: 7.0 floats: 8.0 floats: 8.0 floats: 8.0 @@ -3691,10 +3490,10 @@ ONNX floats: 8.0 floats: 8.0 floats: 7.0 - floats: 5.0 floats: 7.0 - floats: 8.0 - floats: 8.0 + floats: 5.0 + floats: 5.0 + floats: 3.0 floats: 6.0 floats: 8.0 floats: 8.0 @@ -3703,118 +3502,93 @@ ONNX floats: 8.0 floats: 8.0 floats: 8.0 + floats: 8.0 + floats: 8.0 + floats: 7.0 + floats: 6.0 + floats: 7.0 + floats: 7.0 + floats: 5.0 type: FLOATS } domain: "ai.onnx.ml" } node { - input: "node_sample2_output02" - output: "node_sample2_Y02" - name: "node_sample2_LabelEncoder" + input: "node_sample1_output02" + output: "node_sample1_Y02" + name: "node_sample1_LabelEncoder" op_type: "LabelEncoder" attribute { name: "keys_int64s" - ints: 7 + ints: 4 ints: 8 - ints: 10 + ints: 9 ints: 11 - ints: 14 - ints: 15 - ints: 17 + ints: 12 + ints: 13 + ints: 16 ints: 18 - ints: 20 - ints: 21 - ints: 26 - ints: 27 + ints: 19 + ints: 22 + ints: 24 + ints: 25 + ints: 28 ints: 29 - ints: 30 - ints: 33 + ints: 31 + ints: 32 ints: 34 - ints: 36 - ints: 37 ints: 39 ints: 40 + ints: 41 + ints: 42 ints: 46 ints: 47 - ints: 48 + ints: 49 ints: 50 - ints: 51 ints: 53 ints: 54 ints: 55 - ints: 59 + ints: 60 + ints: 61 ints: 63 ints: 64 - ints: 65 ints: 66 - ints: 67 - ints: 73 - ints: 74 - ints: 76 - ints: 77 - ints: 80 - ints: 81 - ints: 82 - ints: 83 - ints: 87 - ints: 89 - ints: 90 - ints: 91 - ints: 95 - ints: 96 - ints: 97 - ints: 100 - ints: 101 - ints: 103 - ints: 104 + ints: 68 + ints: 69 + ints: 71 + ints: 72 type: INTS } attribute { name: "values_floats" floats: 1.0 + floats: 4.0 floats: 1.0 floats: 1.0 - floats: 2.0 - floats: 1.0 - floats: 2.0 - floats: 3.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 2.0 - floats: 2.0 - floats: 1.0 - floats: 3.0 - floats: 9.0 - floats: 6.0 - floats: 8.0 - floats: 2.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 - floats: 1.0 + floats: 7.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 floats: 1.0 - floats: 4.0 floats: 1.0 floats: 1.0 + floats: 2.0 + floats: 32.0 floats: 1.0 floats: 1.0 floats: 3.0 - floats: 2.0 - floats: 1.0 - floats: 8.0 + floats: 9.0 floats: 1.0 floats: 1.0 + floats: 2.0 + floats: 2.0 floats: 1.0 + floats: 7.0 + floats: 5.0 floats: 1.0 floats: 1.0 - floats: 6.0 floats: 1.0 floats: 1.0 floats: 1.0 @@ -3829,26 +3603,10 @@ ONNX domain: "ai.onnx.ml" } node { - input: "node_sample0_reshaped0" - input: "dec_Powcst" - output: "eq2_0_C0" - name: "eq2_0_Equal" - op_type: "Equal" - domain: "" - } - node { - input: "node_sample0_reshaped0" - input: "dec_Powcst" - output: "plus2_0_C0" - name: "plus2_0_Greater" - op_type: "Greater" - domain: "" - } - node { - input: "path_length1_Y0" + input: "path_length0_Y0" input: "path_length0_Reshapecst" - output: "path_length1_reshaped0" - name: "path_length1_Reshape" + output: "path_length0_reshaped0" + name: "path_length0_Reshape" op_type: "Reshape" attribute { name: "allowzero" @@ -3857,6 +3615,14 @@ ONNX } domain: "" } + node { + input: "node_sample0_reshaped0" + input: "dec_Powcst" + output: "eq2_0_C0" + name: "eq2_0_Equal" + op_type: "Equal" + domain: "" + } node { input: "node_sample1_Y02" input: "path_length0_Reshapecst" @@ -3897,25 +3663,22 @@ ONNX domain: "" } node { - input: "plus2_0_C0" - output: "plus2_0_output0" - name: "plus2_0_Cast" - op_type: "Cast" - attribute { - name: "to" - i: 1 - type: INT - } + input: "node_sample0_reshaped0" + input: "dec_Powcst" + output: "plus2_0_C0" + name: "plus2_0_Greater" + op_type: "Greater" domain: "" } node { - input: "eq2_0_C0" - output: "eq2_0_output0" - name: "eq2_0_Cast" - op_type: "Cast" + input: "path_length1_Y0" + input: "path_length0_Reshapecst" + output: "path_length1_reshaped0" + name: "path_length1_Reshape" + op_type: "Reshape" attribute { - name: "to" - i: 1 + name: "allowzero" + i: 0 type: INT } domain: "" @@ -3929,11 +3692,27 @@ ONNX domain: "" } node { - input: "node_sample1_reshaped0" - input: "dec_Powcst" - output: "eq2_1_C0" - name: "eq2_1_Equal" - op_type: "Equal" + input: "plus2_0_C0" + output: "plus2_0_output0" + name: "plus2_0_Cast" + op_type: "Cast" + attribute { + name: "to" + i: 1 + type: INT + } + domain: "" + } + node { + input: "eq2_0_C0" + output: "eq2_0_output0" + name: "eq2_0_Cast" + op_type: "Cast" + attribute { + name: "to" + i: 1 + type: INT + } domain: "" } node { @@ -3953,11 +3732,11 @@ ONNX domain: "" } node { - input: "plus2_0_output0" - input: "node_sample0_reshaped0" - output: "eqp2ps0_C0" - name: "eqp2ps0_Mul" - op_type: "Mul" + input: "node_sample1_reshaped0" + input: "dec_Powcst" + output: "eq2_1_C0" + name: "eq2_1_Equal" + op_type: "Equal" domain: "" } node { @@ -3972,6 +3751,14 @@ ONNX } domain: "" } + node { + input: "plus2_0_output0" + input: "node_sample0_reshaped0" + output: "eqp2ps0_C0" + name: "eqp2ps0_Mul" + op_type: "Mul" + domain: "" + } node { input: "eq2_1_C0" output: "eq2_1_output0" @@ -4008,22 +3795,6 @@ ONNX } domain: "" } - node { - input: "eqp2ps0_C0" - input: "eqp2p_m1_0_Addcst" - output: "eqp2p_m1_0_C0" - name: "eqp2p_m1_0_Add" - op_type: "Add" - domain: "" - } - node { - input: "eqp2ps0_C0" - input: "eqp2p_m1_0_Maxcst" - output: "eqp_ns0_max0" - name: "eqp_ns0_Max" - op_type: "Max" - domain: "" - } node { input: "plus2_1_output0" input: "node_sample1_reshaped0" @@ -4032,6 +3803,14 @@ ONNX op_type: "Mul" domain: "" } + node { + input: "eqp2ps0_C0" + input: "eqp2p_m1_0_Addcst" + output: "eqp2p_m1_0_C0" + name: "eqp2p_m1_0_Add" + op_type: "Add" + domain: "" + } node { input: "plus2_2_output0" input: "node_sample2_reshaped0" @@ -4041,18 +3820,10 @@ ONNX domain: "" } node { - input: "eqp2p_m1_0_C0" - input: "eqp2p_m1_0_Maxcst1" - output: "eqp2p_m1_0_max02" - name: "eqp2p_m1_0_Max1" - op_type: "Max" - domain: "" - } - node { - input: "eqp2p_m1_0_C0" + input: "eqp2ps0_C0" input: "eqp2p_m1_0_Maxcst" - output: "eqp2p_m1_0_max0" - name: "eqp2p_m1_0_Max" + output: "eqp_ns0_max0" + name: "eqp_ns0_Max" op_type: "Max" domain: "" } @@ -4089,18 +3860,19 @@ ONNX domain: "" } node { - input: "eqp2p_m1_0_max0" - output: "eqp_log0_output0" - name: "eqp_log0_Log" - op_type: "Log" + input: "eqp2p_m1_0_C0" + input: "eqp2p_m1_0_Maxcst" + output: "eqp2p_m1_0_max0" + name: "eqp2p_m1_0_Max" + op_type: "Max" domain: "" } node { - input: "eqp2p_m1_0_max02" - input: "eqp_ns0_max0" - output: "eqp_ns0_C01" - name: "eqp_ns0_Div" - op_type: "Div" + input: "eqp2p_m1_0_C0" + input: "eqp2p_m1_0_Maxcst1" + output: "eqp2p_m1_0_max02" + name: "eqp2p_m1_0_Max1" + op_type: "Max" domain: "" } node { @@ -4120,11 +3892,11 @@ ONNX domain: "" } node { - input: "eqp2p_m1_2_C0" - input: "eqp2p_m1_0_Maxcst" - output: "eqp2p_m1_2_max0" - name: "eqp2p_m1_2_Max" - op_type: "Max" + input: "eqp2p_m1_0_max02" + input: "eqp_ns0_max0" + output: "eqp_ns0_C01" + name: "eqp_ns0_Div" + op_type: "Div" domain: "" } node { @@ -4135,6 +3907,28 @@ ONNX op_type: "Max" domain: "" } + node { + input: "eqp2p_m1_2_C0" + input: "eqp2p_m1_0_Maxcst" + output: "eqp2p_m1_2_max0" + name: "eqp2p_m1_2_Max" + op_type: "Max" + domain: "" + } + node { + input: "eqp2p_m1_0_max0" + output: "eqp_log0_output0" + name: "eqp_log0_Log" + op_type: "Log" + domain: "" + } + node { + input: "eqp2p_m1_1_max0" + output: "eqp_log1_output0" + name: "eqp_log1_Log" + op_type: "Log" + domain: "" + } node { input: "eqp_log0_output0" input: "eqp_log0_Addcst" @@ -4160,9 +3954,9 @@ ONNX domain: "" } node { - input: "eqp2p_m1_1_max0" - output: "eqp_log1_output0" - name: "eqp_log1_Log" + input: "eqp2p_m1_2_max0" + output: "eqp_log2_output0" + name: "eqp_log2_Log" op_type: "Log" domain: "" } @@ -4174,21 +3968,6 @@ ONNX op_type: "Div" domain: "" } - node { - input: "eqp2p_m1_2_max0" - output: "eqp_log2_output0" - name: "eqp_log2_Log" - op_type: "Log" - domain: "" - } - node { - input: "eqp_log0_C01" - input: "dec_Powcst" - output: "eqp_log0_C0" - name: "eqp_log0_Mul" - op_type: "Mul" - domain: "" - } node { input: "eqp_log1_output0" input: "eqp_log0_Addcst" @@ -4197,14 +3976,6 @@ ONNX op_type: "Add" domain: "" } - node { - input: "eqp_ns1_C01" - input: "eqp_ns0_Mulcst" - output: "eqp_ns1_C0" - name: "eqp_ns1_Mul" - op_type: "Mul" - domain: "" - } node { input: "eqp_ns2_C01" input: "eqp_ns0_Mulcst" @@ -4222,18 +3993,18 @@ ONNX domain: "" } node { - input: "eqp_log0_C0" - input: "eqp_ns0_C0" - output: "avlog0_C01" - name: "avlog0_Add" - op_type: "Add" + input: "eqp_ns1_C01" + input: "eqp_ns0_Mulcst" + output: "eqp_ns1_C0" + name: "eqp_ns1_Mul" + op_type: "Mul" domain: "" } node { - input: "eqp_log1_C01" + input: "eqp_log0_C01" input: "dec_Powcst" - output: "eqp_log1_C0" - name: "eqp_log1_Mul" + output: "eqp_log0_C0" + name: "eqp_log0_Mul" op_type: "Mul" domain: "" } @@ -4246,18 +4017,18 @@ ONNX domain: "" } node { - input: "avlog0_C01" - input: "plus2_0_output0" - output: "avlog0_C0" - name: "avlog0_Mul" + input: "eqp_log1_C01" + input: "dec_Powcst" + output: "eqp_log1_C0" + name: "eqp_log1_Mul" op_type: "Mul" domain: "" } node { - input: "eqp_log1_C0" - input: "eqp_ns1_C0" - output: "avlog1_C01" - name: "avlog1_Add" + input: "eqp_log0_C0" + input: "eqp_ns0_C0" + output: "avlog0_C01" + name: "avlog0_Add" op_type: "Add" domain: "" } @@ -4269,6 +4040,22 @@ ONNX op_type: "Add" domain: "" } + node { + input: "eqp_log1_C0" + input: "eqp_ns1_C0" + output: "avlog1_C01" + name: "avlog1_Add" + op_type: "Add" + domain: "" + } + node { + input: "avlog0_C01" + input: "plus2_0_output0" + output: "avlog0_C0" + name: "avlog0_Mul" + op_type: "Mul" + domain: "" + } node { input: "avlog1_C01" input: "plus2_1_output0" @@ -4293,14 +4080,6 @@ ONNX op_type: "Mul" domain: "" } - node { - input: "path_length0_reshaped0" - input: "avpl0_C0" - output: "depth0_C01" - name: "depth0_Add" - op_type: "Add" - domain: "" - } node { input: "eq2_1_output0" input: "avlog1_C0" @@ -4310,18 +4089,18 @@ ONNX domain: "" } node { - input: "eq2_2_output0" - input: "avlog2_C0" - output: "avpl2_C0" - name: "avpl2_Add" + input: "path_length0_reshaped0" + input: "avpl0_C0" + output: "depth0_C01" + name: "depth0_Add" op_type: "Add" domain: "" } node { - input: "depth0_C01" - input: "eqp2p_m1_0_Addcst" - output: "depth0_C0" - name: "depth0_Add1" + input: "eq2_2_output0" + input: "avlog2_C0" + output: "avpl2_C0" + name: "avpl2_Add" op_type: "Add" domain: "" } @@ -4342,10 +4121,10 @@ ONNX domain: "" } node { - input: "depth1_C01" + input: "depth0_C01" input: "eqp2p_m1_0_Addcst" - output: "depth1_C0" - name: "depth1_Add1" + output: "depth0_C0" + name: "depth0_Add1" op_type: "Add" domain: "" } @@ -4357,6 +4136,14 @@ ONNX op_type: "Add" domain: "" } + node { + input: "depth1_C01" + input: "eqp2p_m1_0_Addcst" + output: "depth1_C0" + name: "depth1_Add1" + op_type: "Add" + domain: "" + } node { input: "depth0_C0" input: "depth1_C0" @@ -4562,25 +4349,25 @@ ONNX } } } - opset_import { - domain: "ai.onnx.ml" - version: 2 - } opset_import { domain: "" version: 15 } + opset_import { + domain: "ai.onnx.ml" + version: 2 + } -.. GENERATED FROM PYTHON SOURCE LINES 67-69 +.. GENERATED FROM PYTHON SOURCE LINES 64-66 The last line shows the opsets. Let's extract it. -.. GENERATED FROM PYTHON SOURCE LINES 69-74 +.. GENERATED FROM PYTHON SOURCE LINES 66-71 .. code-block:: default @@ -4595,22 +4382,20 @@ Let's extract it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - domain: 'ai.onnx.ml', version: 2 domain: '', version: 15 + domain: 'ai.onnx.ml', version: 2 -.. GENERATED FROM PYTHON SOURCE LINES 75-77 +.. GENERATED FROM PYTHON SOURCE LINES 72-74 There are two opsets, one for standard operators, the other for machine learning operators. -.. GENERATED FROM PYTHON SOURCE LINES 79-84 +.. GENERATED FROM PYTHON SOURCE LINES 76-81 ONNX and opset ++++++++++++++ @@ -4618,7 +4403,7 @@ ONNX and opset The converter can convert a model to an older opset than the default one, from 1 to the last available one. -.. GENERATED FROM PYTHON SOURCE LINES 84-103 +.. GENERATED FROM PYTHON SOURCE LINES 81-100 .. code-block:: default @@ -4647,31 +4432,29 @@ than the default one, from 1 to the last available one. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - target: 6 --> {'': 6, 'ai.onnx.ml': 2} 91 + target: 6 --> {'ai.onnx.ml': 2, '': 6} 91 target: 7 --> {'': 7, 'ai.onnx.ml': 2} 91 - target: 8 --> {'': 8, 'ai.onnx.ml': 2} 91 - target: 9 --> {'': 9, 'ai.onnx.ml': 2} 91 + target: 8 --> {'ai.onnx.ml': 2, '': 8} 91 + target: 9 --> {'ai.onnx.ml': 2, '': 9} 91 target: 10 --> {'ai.onnx.ml': 2, '': 10} 91 target: 11 --> {'ai.onnx.ml': 2, '': 11} 91 target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91 target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91 target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91 - target: 15 --> {'ai.onnx.ml': 2, '': 15} 91 - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version. - warnings.warn( - target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version. + target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91 + target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91 + target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91 + target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91 + /home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version. warnings.warn( - target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") + target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.") -.. GENERATED FROM PYTHON SOURCE LINES 104-115 +.. GENERATED FROM PYTHON SOURCE LINES 101-112 It shows that the model cannot be converted for opset below 5. Operator `Reshape {'': 9, 'ai.onnx.ml': 2} 91 + target: 9 --> {'ai.onnx.ml': 2, '': 9} 91 try target_opset: {'': 9, 'ai.onnx.ml': 3} - target: 9 --> {'': 9, 'ai.onnx.ml': 2} 91 + target: 9 --> {'ai.onnx.ml': 2, '': 9} 91 try target_opset: {'': 10, 'ai.onnx.ml': 1} target: 10 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") try target_opset: {'': 10, 'ai.onnx.ml': 2} @@ -4751,29 +4532,37 @@ The previous example changed the opset of the main domain try target_opset: {'': 15, 'ai.onnx.ml': 1} target: 15 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") try target_opset: {'': 15, 'ai.onnx.ml': 2} - target: 15 --> {'ai.onnx.ml': 2, '': 15} 91 + target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91 try target_opset: {'': 15, 'ai.onnx.ml': 3} - target: 15 --> {'ai.onnx.ml': 2, '': 15} 91 + target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91 try target_opset: {'': 16, 'ai.onnx.ml': 1} - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version. - warnings.warn( target: 16 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") try target_opset: {'': 16, 'ai.onnx.ml': 2} - target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") + target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91 try target_opset: {'': 16, 'ai.onnx.ml': 3} - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version. - warnings.warn( - target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") + target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91 try target_opset: {'': 17, 'ai.onnx.ml': 1} - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version. - warnings.warn( target: 17 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") try target_opset: {'': 17, 'ai.onnx.ml': 2} - target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") + target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91 try target_opset: {'': 17, 'ai.onnx.ml': 3} - D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version. + target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91 + try target_opset: {'': 18, 'ai.onnx.ml': 1} + target: 18 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") + try target_opset: {'': 18, 'ai.onnx.ml': 2} + target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91 + try target_opset: {'': 18, 'ai.onnx.ml': 3} + target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91 + try target_opset: {'': 19, 'ai.onnx.ml': 1} + /home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version. warnings.warn( - target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.") + target: 19 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.") + try target_opset: {'': 19, 'ai.onnx.ml': 2} + target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.") + try target_opset: {'': 19, 'ai.onnx.ml': 3} + /home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version. + warnings.warn( + target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.") @@ -4781,35 +4570,23 @@ The previous example changed the opset of the main domain .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 5.264 seconds) + **Total running time of the script:** ( 0 minutes 1.345 seconds) .. _sphx_glr_download_auto_tutorial_plot_cbegin_opset.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_cbegin_opset.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_cbegin_opset.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_cbegin_opset.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_cbegin_opset.ipynb ` + :download:`Download Jupyter notebook: plot_cbegin_opset.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_dbegin_options.rst.txt b/_sources/auto_tutorial/plot_dbegin_options.rst.txt index d0d2fab7d..3c6968e6b 100644 --- a/_sources/auto_tutorial/plot_dbegin_options.rst.txt +++ b/_sources/auto_tutorial/plot_dbegin_options.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_dbegin_options.py" +.. "auto_tutorial/plot_dbegin_options.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -31,10 +31,6 @@ runtime has an implementation for it. What if two different users needs two different conversion for the same model? Let's see how this may be done. -.. contents:: - :local: - - Option *zipmap* +++++++++++++++ @@ -93,7 +89,7 @@ There might be in the graph many classifiers, it is important to have a way to specify which classifier should keep its *ZipMap* and which is not. So it is possible to specify options by id. -.. GENERATED FROM PYTHON SOURCE LINES 79-105 +.. GENERATED FROM PYTHON SOURCE LINES 75-101 .. code-block:: default @@ -129,11 +125,9 @@ and which is not. So it is possible to specify options by id. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1): + /home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1): STOP: TOTAL NO. of ITERATIONS REACHED LIMIT. Increase the number of iterations (max_iter) or scale the data as shown in: @@ -142,7 +136,7 @@ and which is not. So it is possible to specify options by id. https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression n_iter_i = _check_optimize_result( OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # inputs @@ -157,11 +151,11 @@ and which is not. So it is possible to specify options by id. -.. GENERATED FROM PYTHON SOURCE LINES 106-107 +.. GENERATED FROM PYTHON SOURCE LINES 102-103 Visually. -.. GENERATED FROM PYTHON SOURCE LINES 107-113 +.. GENERATED FROM PYTHON SOURCE LINES 103-109 .. code-block:: default @@ -183,12 +177,12 @@ Visually. -.. GENERATED FROM PYTHON SOURCE LINES 114-116 +.. GENERATED FROM PYTHON SOURCE LINES 110-112 We need to compare that kind of visualisation to what it would give with operator *ZipMap*. -.. GENERATED FROM PYTHON SOURCE LINES 116-121 +.. GENERATED FROM PYTHON SOURCE LINES 112-117 .. code-block:: default @@ -203,12 +197,10 @@ what it would give with operator *ZipMap*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # inputs @@ -225,11 +217,11 @@ what it would give with operator *ZipMap*. -.. GENERATED FROM PYTHON SOURCE LINES 122-123 +.. GENERATED FROM PYTHON SOURCE LINES 118-119 Visually. -.. GENERATED FROM PYTHON SOURCE LINES 123-129 +.. GENERATED FROM PYTHON SOURCE LINES 119-125 .. code-block:: default @@ -251,12 +243,12 @@ Visually. -.. GENERATED FROM PYTHON SOURCE LINES 130-132 +.. GENERATED FROM PYTHON SOURCE LINES 126-128 Using function *id* has one flaw: it is not pickable. It is just better to use strings. -.. GENERATED FROM PYTHON SOURCE LINES 132-139 +.. GENERATED FROM PYTHON SOURCE LINES 128-135 .. code-block:: default @@ -273,12 +265,10 @@ It is just better to use strings. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # inputs @@ -293,11 +283,11 @@ It is just better to use strings. -.. GENERATED FROM PYTHON SOURCE LINES 140-141 +.. GENERATED FROM PYTHON SOURCE LINES 136-137 Visually. -.. GENERATED FROM PYTHON SOURCE LINES 141-147 +.. GENERATED FROM PYTHON SOURCE LINES 137-143 .. code-block:: default @@ -319,7 +309,7 @@ Visually. -.. GENERATED FROM PYTHON SOURCE LINES 148-153 +.. GENERATED FROM PYTHON SOURCE LINES 144-149 Option in a pipeline ++++++++++++++++++++ @@ -327,7 +317,7 @@ Option in a pipeline In a pipeline, :epkg:`sklearn-onnx` uses the same name convention. -.. GENERATED FROM PYTHON SOURCE LINES 153-166 +.. GENERATED FROM PYTHON SOURCE LINES 149-162 .. code-block:: default @@ -350,12 +340,10 @@ name convention. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # init: Ad_Addcst (Ad_Addcst) @@ -375,11 +363,11 @@ name convention. -.. GENERATED FROM PYTHON SOURCE LINES 167-168 +.. GENERATED FROM PYTHON SOURCE LINES 163-164 Visually. -.. GENERATED FROM PYTHON SOURCE LINES 168-174 +.. GENERATED FROM PYTHON SOURCE LINES 164-170 .. code-block:: default @@ -401,7 +389,7 @@ Visually. -.. GENERATED FROM PYTHON SOURCE LINES 175-182 +.. GENERATED FROM PYTHON SOURCE LINES 171-178 Option *raw_scores* +++++++++++++++++++ @@ -411,7 +399,7 @@ returns probabilities by default. But many models compute unscaled *raw_scores*. First, with probabilities: -.. GENERATED FROM PYTHON SOURCE LINES 182-198 +.. GENERATED FROM PYTHON SOURCE LINES 178-194 .. code-block:: default @@ -437,11 +425,9 @@ First, with probabilities: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984], + {'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984], [0.7944385 , 0.19728662, 0.00827491], [0.85557765, 0.13792053, 0.00650185], [0.8262804 , 0.16634221, 0.00737737], @@ -450,11 +436,11 @@ First, with probabilities: -.. GENERATED FROM PYTHON SOURCE LINES 199-200 +.. GENERATED FROM PYTHON SOURCE LINES 195-196 Then with raw scores: -.. GENERATED FROM PYTHON SOURCE LINES 200-208 +.. GENERATED FROM PYTHON SOURCE LINES 196-204 .. code-block:: default @@ -472,11 +458,9 @@ Then with raw scores: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984], + {'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984], [0.7944385 , 0.19728662, 0.00827491], [0.85557765, 0.13792053, 0.00650185], [0.8262804 , 0.16634221, 0.00737737], @@ -485,13 +469,13 @@ Then with raw scores: -.. GENERATED FROM PYTHON SOURCE LINES 209-212 +.. GENERATED FROM PYTHON SOURCE LINES 205-208 It did not seem to work... We need to tell that applies on a specific part of the pipeline and not the whole pipeline. -.. GENERATED FROM PYTHON SOURCE LINES 212-220 +.. GENERATED FROM PYTHON SOURCE LINES 208-216 .. code-block:: default @@ -509,11 +493,9 @@ and not the whole pipeline. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[ 2.2707398 , 0.18354762, -2.4542873 ], + {'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[ 2.2707398 , 0.18354762, -2.4542873 ], [ 1.9857951 , 0.5928172 , -2.5786123 ], [ 2.2349296 , 0.4098304 , -2.6447601 ], [ 2.1071343 , 0.5042473 , -2.6113818 ], @@ -522,12 +504,12 @@ and not the whole pipeline. -.. GENERATED FROM PYTHON SOURCE LINES 221-223 +.. GENERATED FROM PYTHON SOURCE LINES 217-219 There are negative values. That works. Strings are still easier to use. -.. GENERATED FROM PYTHON SOURCE LINES 223-232 +.. GENERATED FROM PYTHON SOURCE LINES 219-228 .. code-block:: default @@ -546,11 +528,9 @@ Strings are still easier to use. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - {'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[ 2.2707398 , 0.18354762, -2.4542873 ], + {'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[ 2.2707398 , 0.18354762, -2.4542873 ], [ 1.9857951 , 0.5928172 , -2.5786123 ], [ 2.2349296 , 0.4098304 , -2.6447601 ], [ 2.1071343 , 0.5042473 , -2.6113818 ], @@ -559,11 +539,11 @@ Strings are still easier to use. -.. GENERATED FROM PYTHON SOURCE LINES 233-234 +.. GENERATED FROM PYTHON SOURCE LINES 229-230 Negative figures. We still have raw scores. -.. GENERATED FROM PYTHON SOURCE LINES 236-241 +.. GENERATED FROM PYTHON SOURCE LINES 232-237 Option *decision_path* ++++++++++++++++++++++ @@ -571,7 +551,7 @@ Option *decision_path* *scikit-learn* implements a function to retrieve the decision path. It can be enabled by option *decision_path*. -.. GENERATED FROM PYTHON SOURCE LINES 241-253 +.. GENERATED FROM PYTHON SOURCE LINES 237-249 .. code-block:: default @@ -593,21 +573,19 @@ decision path. It can be enabled by option *decision_path*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[1 0 1 0 1 1 0 1 0 1] - [1 0 1 0 1 1 0 1 0 1]] + [[1 0 1 0 1 1 0 1 1 0] + [1 0 1 0 1 1 0 1 1 0]] -.. GENERATED FROM PYTHON SOURCE LINES 254-255 +.. GENERATED FROM PYTHON SOURCE LINES 250-251 The model produces 3 outputs. -.. GENERATED FROM PYTHON SOURCE LINES 255-258 +.. GENERATED FROM PYTHON SOURCE LINES 251-254 .. code-block:: default @@ -620,8 +598,6 @@ The model produces 3 outputs. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ['label', 'probabilities', 'decision_path'] @@ -629,11 +605,11 @@ The model produces 3 outputs. -.. GENERATED FROM PYTHON SOURCE LINES 259-260 +.. GENERATED FROM PYTHON SOURCE LINES 255-256 Let's display the last one. -.. GENERATED FROM PYTHON SOURCE LINES 260-264 +.. GENERATED FROM PYTHON SOURCE LINES 256-260 .. code-block:: default @@ -647,17 +623,15 @@ Let's display the last one. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [['10101' '10101'] - ['10101' '10101']] + [['10101' '10110'] + ['10101' '10110']] -.. GENERATED FROM PYTHON SOURCE LINES 265-270 +.. GENERATED FROM PYTHON SOURCE LINES 261-266 List of available options +++++++++++++++++++++++++ @@ -665,7 +639,7 @@ List of available options Options are registered for every converted to detect any supported options while running the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 270-283 +.. GENERATED FROM PYTHON SOURCE LINES 266-279 .. code-block:: default @@ -688,12 +662,14 @@ supported options while running the conversion. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none + LgbmClassifier {'zipmap': [True, False], 'nocl': [True, False]} + LightGbmBooster {'cast': [True, False]} LightGbmLGBMClassifier {'nocl': [True, False], 'zipmap': [True, False, 'columns']} LightGbmLGBMRegressor {'split': None} + Skl2onnxTraceableCountVectorizer {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]} + Skl2onnxTraceableTfidfVectorizer {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]} AdaBoostClassifier {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]} BaggingClassifier {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]} BayesianGaussianMixture {'score_samples': [True, False]} @@ -732,7 +708,9 @@ supported options while running the conversion. MultiOutputClassifier {'nocl': [False, True], 'output_class_labels': [False, True], 'zipmap': [False, True]} MultinomialNB {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]} NearestNeighbors {'optim': [None, 'cdist']} + OneVsOneClassifier {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True]} OneVsRestClassifier {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]} + QuadraticDiscriminantAnalysis {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True]} RadiusNeighborsClassifier {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'optim': [None, 'cdist']} RadiusNeighborsRegressor {'optim': [None, 'cdist']} RandomForestClassifier {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]} @@ -745,8 +723,12 @@ supported options while running the conversion. TfidfTransformer {'nan': [True, False]} TfidfVectorizer {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]} VotingClassifier {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]} - XGBoostXGBClassifier {'nocl': [True, False], 'zipmap': [True, False, 'columns']} - all options: ['decision_leaf', + WrappedLightGbmBoosterClassifier {'zipmap': [True, False], 'nocl': [True, False]} + XGBoostXGBClassifier {'zipmap': [True, False], 'raw_scores': [True, False], 'nocl': [True, False]} + fct_score_cdist_sum {'cdist': [None, 'single-node']} + all options: ['cast', + 'cdist', + 'decision_leaf', 'decision_path', 'div', 'gemm', @@ -770,35 +752,23 @@ supported options while running the conversion. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.810 seconds) + **Total running time of the script:** ( 0 minutes 1.322 seconds) .. _sphx_glr_download_auto_tutorial_plot_dbegin_options.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_dbegin_options.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_dbegin_options.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_dbegin_options.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_dbegin_options.ipynb ` + :download:`Download Jupyter notebook: plot_dbegin_options.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_dbegin_options_list.rst.txt b/_sources/auto_tutorial/plot_dbegin_options_list.rst.txt index 8178f021c..93167c600 100644 --- a/_sources/auto_tutorial/plot_dbegin_options_list.rst.txt +++ b/_sources/auto_tutorial/plot_dbegin_options_list.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_dbegin_options_list.py" +.. "auto_tutorial/plot_dbegin_options_list.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -30,16 +30,13 @@ the converters do not change their behaviour, they fail if they use a black listed operator, a couple of them produces a different ONNX graph. -.. contents:: - :local: - GaussianMixture +++++++++++++++ The first converter to change its behaviour depending on a black list of operators is for model *GaussianMixture*. -.. GENERATED FROM PYTHON SOURCE LINES 25-40 +.. GENERATED FROM PYTHON SOURCE LINES 22-37 .. code-block:: default @@ -66,17 +63,17 @@ of operators is for model *GaussianMixture*. .. raw:: html
-
GaussianMixture()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
GaussianMixture()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 41-43 +.. GENERATED FROM PYTHON SOURCE LINES 38-40 Default conversion ++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 43-55 +.. GENERATED FROM PYTHON SOURCE LINES 40-52 .. code-block:: default @@ -98,25 +95,23 @@ Default conversion .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [-1.11113176 -2.15611455 -2.44624895 -2.01165533 -3.18516454] - [[-1.1111317] - [-2.156114 ] - [-2.446249 ] - [-2.0116558] - [-3.1851645]] + [-1.20177945 -3.55755314 -1.50589005 -5.95852584 -2.41493749] + [[-1.2017796] + [-3.5575526] + [-1.5058892] + [-5.9585238] + [-2.4149387]] -.. GENERATED FROM PYTHON SOURCE LINES 56-57 +.. GENERATED FROM PYTHON SOURCE LINES 53-54 Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 57-64 +.. GENERATED FROM PYTHON SOURCE LINES 54-61 .. code-block:: default @@ -139,7 +134,7 @@ Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 65-71 +.. GENERATED FROM PYTHON SOURCE LINES 62-68 Conversion without ReduceLogSumExp ++++++++++++++++++++++++++++++++++ @@ -148,7 +143,7 @@ Parameter *black_op* is used to tell the converter not to use this operator. Let's see what the converter produces in that case. -.. GENERATED FROM PYTHON SOURCE LINES 71-83 +.. GENERATED FROM PYTHON SOURCE LINES 68-80 .. code-block:: default @@ -170,25 +165,23 @@ produces in that case. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [-1.11113176 -2.15611455 -2.44624895 -2.01165533 -3.18516454] - [[-1.1111317] - [-2.156114 ] - [-2.446249 ] - [-2.0116558] - [-3.1851645]] + [-1.20177945 -3.55755314 -1.50589005 -5.95852584 -2.41493749] + [[-1.2017796] + [-3.5575526] + [-1.5058892] + [-5.958523 ] + [-2.4149382]] -.. GENERATED FROM PYTHON SOURCE LINES 84-85 +.. GENERATED FROM PYTHON SOURCE LINES 81-82 Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 85-92 +.. GENERATED FROM PYTHON SOURCE LINES 82-89 .. code-block:: default @@ -211,12 +204,12 @@ Display the ONNX graph. -.. GENERATED FROM PYTHON SOURCE LINES 93-95 +.. GENERATED FROM PYTHON SOURCE LINES 90-92 Processing time +++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 95-102 +.. GENERATED FROM PYTHON SOURCE LINES 92-99 .. code-block:: default @@ -233,21 +226,19 @@ Processing time .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0.3173113000000001 - 0.3591471000000013 + 0.23519209400001273 + 0.24321569299991097 -.. GENERATED FROM PYTHON SOURCE LINES 103-104 +.. GENERATED FROM PYTHON SOURCE LINES 100-101 The model using ReduceLogSumExp is much faster. -.. GENERATED FROM PYTHON SOURCE LINES 106-113 +.. GENERATED FROM PYTHON SOURCE LINES 103-110 If the converter cannot convert without... ++++++++++++++++++++++++++++++++++++++++++ @@ -257,7 +248,7 @@ of operators. If a converter fails to convert without using a blacklisted operator (or only whitelisted operators), *skl2onnx* raises an error. -.. GENERATED FROM PYTHON SOURCE LINES 113-122 +.. GENERATED FROM PYTHON SOURCE LINES 110-119 .. code-block:: default @@ -276,8 +267,6 @@ a blacklisted operator (or only whitelisted operators), .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Error: Operator 'Add' is black listed. @@ -288,35 +277,23 @@ a blacklisted operator (or only whitelisted operators), .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.509 seconds) + **Total running time of the script:** ( 0 minutes 1.110 seconds) .. _sphx_glr_download_auto_tutorial_plot_dbegin_options_list.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_dbegin_options_list.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_dbegin_options_list.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_dbegin_options_list.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_dbegin_options_list.ipynb ` + :download:`Download Jupyter notebook: plot_dbegin_options_list.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_dbegin_options_zipmap.rst.txt b/_sources/auto_tutorial/plot_dbegin_options_zipmap.rst.txt index 7cc0c68ad..5d67d9d04 100644 --- a/_sources/auto_tutorial/plot_dbegin_options_zipmap.rst.txt +++ b/_sources/auto_tutorial/plot_dbegin_options_zipmap.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_dbegin_options_zipmap.py" +.. "auto_tutorial/plot_dbegin_options_zipmap.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -29,13 +29,10 @@ into a list of dictionaries where each probabily is mapped to its class id or name. That mechanism retains the class names but is slower. Let's see what other options are available. -.. contents:: - :local: - Train a model and convert it ++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 23-47 +.. GENERATED FROM PYTHON SOURCE LINES 20-44 .. code-block:: default @@ -69,8 +66,6 @@ Train a model and convert it .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none LogisticRegression(max_iter=500) @@ -78,7 +73,7 @@ Train a model and convert it -.. GENERATED FROM PYTHON SOURCE LINES 48-53 +.. GENERATED FROM PYTHON SOURCE LINES 45-50 Default behaviour: zipmap=True ++++++++++++++++++++++++++++++ @@ -86,7 +81,7 @@ Default behaviour: zipmap=True The output type for the probabilities is a list of dictionaries. -.. GENERATED FROM PYTHON SOURCE LINES 53-60 +.. GENERATED FROM PYTHON SOURCE LINES 50-57 .. code-block:: default @@ -103,25 +98,23 @@ dictionaries. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [{10: 0.9601631164550781, 12: 0.039836231619119644, 14: 7.069097023304494e-07}, {10: 0.9455938339233398, 12: 0.054405875504016876, 14: 2.9202584528320585e-07}] + [{10: 0.02909434586763382, 12: 0.9069037437438965, 14: 0.06400192528963089}, {10: 0.001469946000725031, 12: 0.6960583329200745, 14: 0.30247175693511963}] probabilities type: type for the first observations: -.. GENERATED FROM PYTHON SOURCE LINES 61-65 +.. GENERATED FROM PYTHON SOURCE LINES 58-62 Option zipmap=False +++++++++++++++++++ Probabilities are now a matrix. -.. GENERATED FROM PYTHON SOURCE LINES 65-76 +.. GENERATED FROM PYTHON SOURCE LINES 62-73 .. code-block:: default @@ -142,19 +135,17 @@ Probabilities are now a matrix. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[9.6016312e-01 3.9836232e-02 7.0690970e-07] - [9.4559383e-01 5.4405876e-02 2.9202585e-07]] + [[0.02909435 0.90690374 0.06400193] + [0.00146995 0.69605833 0.30247176]] probabilities type: type for the first observations: -.. GENERATED FROM PYTHON SOURCE LINES 77-83 +.. GENERATED FROM PYTHON SOURCE LINES 74-80 Option zipmap='columns' +++++++++++++++++++++++ @@ -163,7 +154,7 @@ This options removes the final operator ZipMap and splits the probabilities into columns. The final model produces one output for the label, and one output per class. -.. GENERATED FROM PYTHON SOURCE LINES 83-94 +.. GENERATED FROM PYTHON SOURCE LINES 80-91 .. code-block:: default @@ -184,24 +175,22 @@ one output for the label, and one output per class. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - output: 'output_label' shape=(38,) values=[10 10]... - output: 'i10' shape=(38,) values=[0.9601631 0.94559383]... - output: 'i12' shape=(38,) values=[0.03983623 0.05440588]... - output: 'i14' shape=(38,) values=[7.0690970e-07 2.9202585e-07]... + output: 'output_label' shape=(38,) values=[12 12]... + output: 'i10' shape=(38,) values=[0.02909435 0.00146995]... + output: 'i12' shape=(38,) values=[0.90690374 0.69605833]... + output: 'i14' shape=(38,) values=[0.06400193 0.30247176]... -.. GENERATED FROM PYTHON SOURCE LINES 95-97 +.. GENERATED FROM PYTHON SOURCE LINES 92-94 Let's compare prediction time +++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 97-117 +.. GENERATED FROM PYTHON SOURCE LINES 94-114 .. code-block:: default @@ -231,21 +220,19 @@ Let's compare prediction time .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Average time with ZipMap: - 0.005073529999998527 + 0.003929830000015499 Average time without ZipMap: - 0.0016709999999960702 + 0.0014273400999172737 Average time without ZipMap but with columns: - 0.0030088799999930414 + 0.002505180000025575 -.. GENERATED FROM PYTHON SOURCE LINES 118-125 +.. GENERATED FROM PYTHON SOURCE LINES 115-122 Option zimpap=False and output_class_labels=True ++++++++++++++++++++++++++++++++++++++++++++++++ @@ -255,7 +242,7 @@ much faster but labels are lost in the process. Option `output_class_labels` can be used to expose the labels as a third output. -.. GENERATED FROM PYTHON SOURCE LINES 125-136 +.. GENERATED FROM PYTHON SOURCE LINES 122-133 .. code-block:: default @@ -276,23 +263,21 @@ as a third output. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[9.6016312e-01 3.9836232e-02 7.0690970e-07] - [9.4559383e-01 5.4405876e-02 2.9202585e-07]] + [[0.02909435 0.90690374 0.06400193] + [0.00146995 0.69605833 0.30247176]] probabilities type: class labels: [10 12 14] -.. GENERATED FROM PYTHON SOURCE LINES 137-138 +.. GENERATED FROM PYTHON SOURCE LINES 134-135 Processing time. -.. GENERATED FROM PYTHON SOURCE LINES 138-143 +.. GENERATED FROM PYTHON SOURCE LINES 135-140 .. code-block:: default @@ -307,17 +292,15 @@ Processing time. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Average time without ZipMap but with output_class_labels: - 0.0029111999999997806 + 0.002399530100001357 -.. GENERATED FROM PYTHON SOURCE LINES 144-151 +.. GENERATED FROM PYTHON SOURCE LINES 141-148 MultiOutputClassifier +++++++++++++++++++++ @@ -327,7 +310,7 @@ to predict. Instead of returning a matrix of probabilities, it returns a sequence of matrices. Let's first modify the labels to get a problem for a MultiOutputClassifier. -.. GENERATED FROM PYTHON SOURCE LINES 151-156 +.. GENERATED FROM PYTHON SOURCE LINES 148-153 .. code-block:: default @@ -342,8 +325,6 @@ a problem for a MultiOutputClassifier. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 10 1000] @@ -355,11 +336,11 @@ a problem for a MultiOutputClassifier. -.. GENERATED FROM PYTHON SOURCE LINES 157-158 +.. GENERATED FROM PYTHON SOURCE LINES 154-155 Let's train a MultiOutputClassifier. -.. GENERATED FROM PYTHON SOURCE LINES 158-170 +.. GENERATED FROM PYTHON SOURCE LINES 155-167 .. code-block:: default @@ -381,31 +362,29 @@ Let's train a MultiOutputClassifier. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none MultiOutputClassifier(estimator=LogisticRegression(max_iter=500)) - D:\github\onnx\sklearn-onnx\skl2onnx\_parse.py:528: UserWarning: Option zipmap is ignored for model . Set option zipmap to False to remove this message. + /home/xadupre/github/sklearn-onnx/skl2onnx/_parse.py:529: UserWarning: Option zipmap is ignored for model . Set option zipmap to False to remove this message. warnings.warn( - [array([[ 12, 112], - [ 14, 114], - [ 14, 114]], dtype=int64), [array([[1.7433858e-02, 9.2380190e-01, 5.8764238e-02], - [8.0360842e-05, 1.3341965e-01, 8.6649996e-01], - [9.2398236e-04, 3.4383032e-01, 6.5524566e-01]], dtype=float32), array([[1.44697949e-02, 5.88752866e-01, 1.22261584e-01, 2.74515748e-01], - [4.41888697e-04, 2.33344465e-01, 6.36498570e-01, 1.29715100e-01], - [2.21832423e-03, 2.89401174e-01, 4.72904593e-01, 2.35475823e-01]], + [array([[ 10, 110], + [ 10, 110], + [ 10, 110]], dtype=int64), [array([[9.5487159e-01, 4.5127936e-02, 4.7449998e-07], + [9.7273737e-01, 2.7262522e-02, 1.5831836e-07], + [9.6799171e-01, 3.2008070e-02, 2.2276080e-07]], dtype=float32), array([[7.2772932e-01, 8.8738047e-02, 1.0638156e-04, 1.8342626e-01], + [8.3462042e-01, 6.5695420e-02, 6.4909793e-05, 9.9619307e-02], + [7.9595613e-01, 6.4299725e-02, 6.8941692e-05, 1.3967523e-01]], dtype=float32)]] -.. GENERATED FROM PYTHON SOURCE LINES 171-173 +.. GENERATED FROM PYTHON SOURCE LINES 168-170 Option zipmap is ignored. Labels are missing but they can be added back as a third output. -.. GENERATED FROM PYTHON SOURCE LINES 173-184 +.. GENERATED FROM PYTHON SOURCE LINES 170-181 .. code-block:: default @@ -426,29 +405,27 @@ added back as a third output. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predicted labels [[ 12 112] - [ 14 114] - [ 14 114]] - predicted probabilies [array([[1.7433858e-02, 9.2380190e-01, 5.8764238e-02], - [8.0360842e-05, 1.3341965e-01, 8.6649996e-01], - [9.2398236e-04, 3.4383032e-01, 6.5524566e-01]], dtype=float32), array([[1.44697949e-02, 5.88752866e-01, 1.22261584e-01, 2.74515748e-01], - [4.41888697e-04, 2.33344465e-01, 6.36498570e-01, 1.29715100e-01], - [2.21832423e-03, 2.89401174e-01, 4.72904593e-01, 2.35475823e-01]], + predicted labels [[ 10 110] + [ 10 110] + [ 10 110]] + predicted probabilies [array([[9.5487159e-01, 4.5127936e-02, 4.7449998e-07], + [9.7273737e-01, 2.7262522e-02, 1.5831836e-07], + [9.6799171e-01, 3.2008070e-02, 2.2276080e-07]], dtype=float32), array([[7.2772932e-01, 8.8738047e-02, 1.0638156e-04, 1.8342626e-01], + [8.3462042e-01, 6.5695420e-02, 6.4909793e-05, 9.9619307e-02], + [7.9595613e-01, 6.4299725e-02, 6.8941692e-05, 1.3967523e-01]], dtype=float32)] class labels [array([10, 12, 14], dtype=int64), array([ 110, 112, 114, 1000], dtype=int64)] -.. GENERATED FROM PYTHON SOURCE LINES 185-186 +.. GENERATED FROM PYTHON SOURCE LINES 182-183 **Versions used for this example** -.. GENERATED FROM PYTHON SOURCE LINES 186-192 +.. GENERATED FROM PYTHON SOURCE LINES 183-189 .. code-block:: default @@ -464,15 +441,13 @@ added back as a third output. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - numpy: 1.21.3 - scikit-learn: 1.1.1 - onnx: 1.12.0 - onnxruntime: 1.10.0 - skl2onnx: 1.11.2 + numpy: 1.23.5 + scikit-learn: 1.3.dev0 + onnx: 1.14.0 + onnxruntime: 1.15.0+cpu + skl2onnx: 1.14.0 @@ -480,35 +455,23 @@ added back as a third output. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.382 seconds) + **Total running time of the script:** ( 0 minutes 0.267 seconds) .. _sphx_glr_download_auto_tutorial_plot_dbegin_options_zipmap.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_dbegin_options_zipmap.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_dbegin_options_zipmap.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_dbegin_options_zipmap.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_dbegin_options_zipmap.ipynb ` + :download:`Download Jupyter notebook: plot_dbegin_options_zipmap.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_ebegin_float_double.rst.txt b/_sources/auto_tutorial/plot_ebegin_float_double.rst.txt index c2f08422b..fa4c09421 100644 --- a/_sources/auto_tutorial/plot_ebegin_float_double.rst.txt +++ b/_sources/auto_tutorial/plot_ebegin_float_double.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_ebegin_float_double.py" +.. "auto_tutorial/plot_ebegin_float_double.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -47,9 +47,6 @@ even a small *dx* may introduce a huge discrepency. Let's look into an example which always produces discrepencies and some ways to overcome this situation. -.. contents:: - :local: - More into the issue +++++++++++++++++++ @@ -69,15 +66,13 @@ However, the probability that both comparisons give different results is not null. The following graph shows the discord areas. -.. GENERATED FROM PYTHON SOURCE LINES 55-108 +.. GENERATED FROM PYTHON SOURCE LINES 52-103 .. code-block:: default from mlprodict.sklapi import OnnxPipeline - from skl2onnx.sklapi import CastTransformer, CastRegressor + from skl2onnx.sklapi import CastTransformer from skl2onnx import to_onnx - from mlprodict.onnx_conv import to_onnx as to_onnx_extended - from mlprodict.onnxrt import OnnxInference from onnxruntime import InferenceSession from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor @@ -137,16 +132,14 @@ the discord areas. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - + -.. GENERATED FROM PYTHON SOURCE LINES 109-116 +.. GENERATED FROM PYTHON SOURCE LINES 104-111 The pipeline and the data +++++++++++++++++++++++++ @@ -156,7 +149,7 @@ does many comparisons in this discord area. This is done by rounding features to integers, a frequent case happening when dealing with categorical features. -.. GENERATED FROM PYTHON SOURCE LINES 116-136 +.. GENERATED FROM PYTHON SOURCE LINES 111-131 .. code-block:: default @@ -188,14 +181,14 @@ happening when dealing with categorical features. .. raw:: html
-
Pipeline(steps=[('scaler', StandardScaler()),
-                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
Pipeline(steps=[('scaler', StandardScaler()),
+                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 137-143 +.. GENERATED FROM PYTHON SOURCE LINES 132-138 The discrepencies +++++++++++++++++ @@ -204,7 +197,7 @@ Let's reuse the function implemented in the first example :ref:`l-diff-dicrepencies` and look into the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 143-163 +.. GENERATED FROM PYTHON SOURCE LINES 138-159 .. code-block:: default @@ -217,7 +210,8 @@ look into the conversion. return d.max(), (d / numpy.abs(p1)).max() - onx = to_onnx(model, Xi_train[:1].astype(numpy.float32)) + onx = to_onnx(model, Xi_train[:1].astype(numpy.float32), + target_opset=15) sess = InferenceSession(onx.SerializeToString()) @@ -234,16 +228,14 @@ look into the conversion. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (230.44514830504892, 5.202494497892177) + (191.7381674075899, 3.336255213062701) -.. GENERATED FROM PYTHON SOURCE LINES 164-197 +.. GENERATED FROM PYTHON SOURCE LINES 160-193 The discrepencies are significant. The ONNX model keeps float at every step. @@ -279,7 +271,7 @@ float in the :epkg:`scikit-learn` pipeline. } -.. GENERATED FROM PYTHON SOURCE LINES 197-207 +.. GENERATED FROM PYTHON SOURCE LINES 193-203 .. code-block:: default @@ -301,23 +293,24 @@ float in the :epkg:`scikit-learn` pipeline. .. raw:: html
-
Pipeline(steps=[('scaler', StandardScaler()), ('cast', CastTransformer()),
-                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
Pipeline(steps=[('scaler', StandardScaler()), ('cast', CastTransformer()),
+                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 208-209 +.. GENERATED FROM PYTHON SOURCE LINES 204-205 The discrepencies. -.. GENERATED FROM PYTHON SOURCE LINES 209-219 +.. GENERATED FROM PYTHON SOURCE LINES 205-216 .. code-block:: default - onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32)) + onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32), + target_opset=15) sess2 = InferenceSession(onx2.SerializeToString()) @@ -332,16 +325,14 @@ The discrepencies. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (230.44514830504892, 5.202494497892176) + (191.7381674075899, 3.336255213062701) -.. GENERATED FROM PYTHON SOURCE LINES 220-225 +.. GENERATED FROM PYTHON SOURCE LINES 217-222 That still fails because the normalizer in :epkg:`scikit-learn` and in :epkg:`ONNX` @@ -349,7 +340,7 @@ use different types. The cast still happens and the *dx* is still here. To remove it, we need to use double in ONNX normalizer. -.. GENERATED FROM PYTHON SOURCE LINES 225-244 +.. GENERATED FROM PYTHON SOURCE LINES 222-242 .. code-block:: default @@ -363,7 +354,8 @@ double in ONNX normalizer. model3.fit(Xi_train, yi_train) onx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32), - options={StandardScaler: {'div': 'div_cast'}}) + options={StandardScaler: {'div': 'div_cast'}}, + target_opset=15) sess3 = InferenceSession(onx3.SerializeToString()) @@ -378,16 +370,14 @@ double in ONNX normalizer. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (3.0013221589797467e-05, 5.45871186188065e-08) + (1.884853452338575e-05, 5.703614863001166e-08) -.. GENERATED FROM PYTHON SOURCE LINES 245-268 +.. GENERATED FROM PYTHON SOURCE LINES 243-266 It works. That also means that it is difficult to change the computation type when a pipeline includes a discontinuous @@ -413,7 +403,7 @@ is trained based on ONNX output. It is implemented in class :epkg:`OnnxPipeline`. -.. GENERATED FROM PYTHON SOURCE LINES 268-277 +.. GENERATED FROM PYTHON SOURCE LINES 266-275 .. code-block:: default @@ -434,30 +424,28 @@ class :epkg:`OnnxPipeline`. .. raw:: html
-
OnnxPipeline(steps=[('scaler',
-                         OnnxTransformer(onnx_bytes=b'\x08\x08\x12\x08skl2onnx\x1a\x061.11.2"\x07ai.onnx(\x002\x00:\xf6\x01\n\xa6\x01\n\x01X\x12\x08variable\x1a\x06Scaler"\x06Scaler*=\n\x06offset=o\x12\x83\xbb=\x9f\x86\x02\xbd=\x7fE\x11\xbd=P\xd7\xed\xbd=\xff\xd7\xa6\xbe= Aq\xbe=!\xfa\xc8==$\x03\x93\xbf=\xbd\xe3\xb4?= \xf7\x1a?\xa0\x01\x06*<\n\x...x950\x84==\xd65\x00==\x03Q\x81<=\x86\x99\xfe;=\xd7\xbb};=\xa3\x8a\xfe:\xa0\x01\x06:\nai.onnx.ml\x12\x1emlprodict_ONNX(StandardScaler)Z\x11\n\x01X\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nb\x18\n\x08variable\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nB\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x0f')),
-                        ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
OnnxPipeline(steps=[('scaler',
+                         OnnxTransformer(onnx_bytes=b'\x08\x08\x12\x08skl2onnx\x1a\x061.14.0"\x07ai.onnx(\x002\x00:\xf6\x01\n\xa6\x01\n\x01X\x12\x08variable\x1a\x06Scaler"\x06Scaler*=\n\x06offset=e\xcf\x8b\xb9=uLp<=\x7f\x8fg<=\x10\xe97\xbe=>\x9e\x03\xbe=\t@I?=b\xeb\xac\xbe=-!_\xc0=KY\x06\xbe=\xf0\xcc\x91\xbf\xa0\x01\x06*<\n\x05scale=xD\xb9?=\x...xa7\x91\x84==*\xf1\x03==s\x19\x80<=\x05\x92\xfe;=\x0e\x00\x81;=y4\x00;\xa0\x01\x06:\nai.onnx.ml\x12\x1emlprodict_ONNX(StandardScaler)Z\x11\n\x01X\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nb\x18\n\x08variable\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nB\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x11')),
+                        ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 278-279 +.. GENERATED FROM PYTHON SOURCE LINES 276-279 -The conversion. +By using opset 17 and opset 3 for domain ai.onnx.ml, the tree thresholds +can be stored as double and not float anymore. That lowerss the discrepancies +even if the outputs are still float. -.. GENERATED FROM PYTHON SOURCE LINES 279-294 +.. GENERATED FROM PYTHON SOURCE LINES 279-289 .. code-block:: default - try: - onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32)) - except ValueError as e: - print("Failing due to %r.\nYou need to update mlprodict." % e) - import sys - sys.exit(0) + onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32), + target_opset=17) sess4 = InferenceSession(onx4.SerializeToString()) @@ -469,210 +457,35 @@ The conversion. - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - (3.0013221589797467e-05, 5.45871186188065e-08) - - - - -.. GENERATED FROM PYTHON SOURCE LINES 295-296 - -It works too in a more simple way. - -.. GENERATED FROM PYTHON SOURCE LINES 298-318 - -No discrepencies at all? -++++++++++++++++++++++++ - -Is it possible to get no error at all? -There is one major obstacle: :epkg:`scikit-learn` -stores the predicted values in every leave with double -(`_tree.pyx - _get_value_ndarray -`_), :epkg:`ONNX` defines the -the predicted values as floats: :epkg:`TreeEnsembleRegressor`. -What can we do to solve it? -What if we could extend ONNX specifications to support -double instead of floats. -We reuse what was developped in example -`Other way to convert `_ -and a custom ONNX node `TreeEnsembleRegressorDouble -`_. - -.. GENERATED FROM PYTHON SOURCE LINES 318-329 - -.. code-block:: default - - - - tree = DecisionTreeRegressor(max_depth=max_depth) - tree.fit(Xi_train, yi_train) - - model_onx = to_onnx_extended(tree, Xi_train[:1].astype(numpy.float64), - rewrite_ops=True) - - oinf5 = OnnxInference(model_onx, runtime='python_compiled') - print(oinf5) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): - if yield_ops is not None: - raise NotImplementedError('yields_ops should be None.') - # inputs - X = dict_inputs['X'] - (variable, ) = n0_treeensembleregressordouble(X) - return { - 'variable': variable, - } - - - - -.. GENERATED FROM PYTHON SOURCE LINES 330-331 - -Let's measure the discrepencies. - -.. GENERATED FROM PYTHON SOURCE LINES 331-336 - -.. code-block:: default - - - X64 = Xi_test.astype(numpy.float64) - skl5 = tree.predict(X64) - ort5 = oinf5.run({'X': X64})['variable'] - - - - - - - - -.. GENERATED FROM PYTHON SOURCE LINES 337-338 - -Perfect, no discrepencies at all. - -.. GENERATED FROM PYTHON SOURCE LINES 338-341 - -.. code-block:: default - - - print(diff(skl5, ort5)) - - - - - .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (0.0, 0.0) - - - - -.. GENERATED FROM PYTHON SOURCE LINES 342-350 - -CastRegressor -+++++++++++++ - -The previous example demonstrated the type difference for -the predicted values explains the small differences between -:epkg:`scikit-learn` and :epkg:`onnxruntime`. But it does not -with the current ONNX. Another option is to cast the -the predictions into floats in the :epkg:`scikit-learn` pipeline. - -.. GENERATED FROM PYTHON SOURCE LINES 350-364 - -.. code-block:: default - + (1.884853452338575e-05, 5.7036148848472986e-08) - ctree = CastRegressor(DecisionTreeRegressor(max_depth=max_depth)) - ctree.fit(Xi_train, yi_train) - onx6 = to_onnx(ctree, Xi_train[:1].astype(numpy.float32)) - - sess6 = InferenceSession(onx6.SerializeToString()) - - skl6 = ctree.predict(X32) - ort6 = sess6.run(None, {'X': X32})[0] - - print(diff(skl6, ort6)) - - - - - -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - (0.0, 0.0) - - - - -.. GENERATED FROM PYTHON SOURCE LINES 365-366 - -Success! .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.825 seconds) + **Total running time of the script:** ( 0 minutes 0.838 seconds) .. _sphx_glr_download_auto_tutorial_plot_ebegin_float_double.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_ebegin_float_double.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_ebegin_float_double.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_ebegin_float_double.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_ebegin_float_double.ipynb ` + :download:`Download Jupyter notebook: plot_ebegin_float_double.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_fbegin_investigate.rst.txt b/_sources/auto_tutorial/plot_fbegin_investigate.rst.txt index b6f764f10..64ae3cd00 100644 --- a/_sources/auto_tutorial/plot_fbegin_investigate.rst.txt +++ b/_sources/auto_tutorial/plot_fbegin_investigate.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_fbegin_investigate.py" +.. "auto_tutorial/plot_fbegin_investigate.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -34,9 +34,6 @@ due to a shape mismatch. Then it is useful the get the shape of every intermediate result. This example looks into two ways of doing it. -.. contents:: - :local: - Look into pipeline steps ++++++++++++++++++++++++ @@ -47,7 +44,7 @@ through every step of the pipeline. If the pipeline has *n* steps, it converts the pipeline with step 1, then the pipeline with steps 1, 2, then 1, 2, 3... -.. GENERATED FROM PYTHON SOURCE LINES 33-45 +.. GENERATED FROM PYTHON SOURCE LINES 30-42 .. code-block:: default @@ -70,11 +67,11 @@ then the pipeline with steps 1, 2, then 1, 2, 3... -.. GENERATED FROM PYTHON SOURCE LINES 46-47 +.. GENERATED FROM PYTHON SOURCE LINES 43-44 The pipeline. -.. GENERATED FROM PYTHON SOURCE LINES 47-57 +.. GENERATED FROM PYTHON SOURCE LINES 44-54 .. code-block:: default @@ -84,7 +81,7 @@ The pipeline. pipe = Pipeline(steps=[ ('std', StandardScaler()), - ('km', KMeans(3)) + ('km', KMeans(3, n_init=3)) ]) pipe.fit(X) @@ -96,24 +93,27 @@ The pipeline. .. raw:: html
-
Pipeline(steps=[('std', StandardScaler()), ('km', KMeans(n_clusters=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
Pipeline(steps=[('std', StandardScaler()),
+                    ('km', KMeans(n_clusters=3, n_init=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 58-61 +.. GENERATED FROM PYTHON SOURCE LINES 55-58 The function goes through every step, overloads the methods *transform* and returns an ONNX graph for every step. -.. GENERATED FROM PYTHON SOURCE LINES 61-65 +.. GENERATED FROM PYTHON SOURCE LINES 58-63 .. code-block:: default steps = collect_intermediate_steps( pipe, "pipeline", - [("X", FloatTensorType([None, X.shape[1]]))]) + [("X", FloatTensorType([None, X.shape[1]]))], + target_opset=17) @@ -122,12 +122,12 @@ returns an ONNX graph for every step. -.. GENERATED FROM PYTHON SOURCE LINES 66-68 +.. GENERATED FROM PYTHON SOURCE LINES 64-66 We call method transform to population the cache the overloaded methods *transform* keeps. -.. GENERATED FROM PYTHON SOURCE LINES 68-70 +.. GENERATED FROM PYTHON SOURCE LINES 66-68 .. code-block:: default @@ -139,170 +139,168 @@ cache the overloaded methods *transform* keeps. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - array([[3.98940603, 0.21295824, 3.12119834], - [4.01793312, 0.99604549, 2.6755083 ], - [4.19343668, 0.65198444, 2.97416665], - [4.19784749, 0.9034561 , 2.88014429], - [4.11157152, 0.40215457, 3.30022609], - [3.89893116, 1.21154793, 3.50554424], - [4.21638048, 0.50244932, 3.14856384], - [3.97313411, 0.09132468, 2.99184826], - [4.40757189, 1.42174651, 2.92515933], - [4.05764261, 0.78993078, 2.79398956], - [3.92088109, 0.78999385, 3.32125333], - [4.07853631, 0.27618123, 3.0493632 ], - [4.16440431, 1.03497888, 2.80635045], - [4.63069748, 1.33482453, 3.21220972], - [4.14619343, 1.63865558, 3.88834965], - [4.49547518, 2.39898792, 4.4998303 ], - [4.02966144, 1.20748818, 3.60978017], - [3.91388548, 0.21618828, 3.05594182], - [3.72562039, 1.20986655, 3.34493953], - [4.10101938, 0.86706182, 3.50065397], - [3.66383713, 0.50401564, 2.80825681], - [3.94496718, 0.66826437, 3.27800809], - [4.51061335, 0.68658071, 3.58990876], - [3.57996434, 0.47945627, 2.55934697], - [3.98817445, 0.36345425, 2.96493153], - [3.88431906, 0.99023912, 2.55682739], - [3.79088782, 0.22683089, 2.8279719 ], - [3.89539875, 0.2947186 , 3.05970831], - [3.88085622, 0.25361098, 2.95425291], - [4.09851673, 0.65019824, 2.87745051], - [4.01796142, 0.80138328, 2.73238773], - [3.57350896, 0.52309257, 2.73361981], - [4.5037664 , 1.57658655, 4.11853014], - [4.4465301 , 1.87652483, 4.22845606], - [3.97906378, 0.76858489, 2.71452112], - [4.01986385, 0.54896332, 2.86508665], - [3.80064093, 0.63079314, 3.0573692 ], - [4.25136846, 0.45982568, 3.40284985], - [4.42052558, 1.2336976 , 3.00742655], - [3.90865188, 0.14580827, 2.95472117], - [4.01192633, 0.20261743, 3.12324651], - [4.64398605, 2.67055552, 2.90164193], - [4.42154566, 0.90927099, 3.15411688], - [3.70483773, 0.50081008, 2.8613548 ], - [3.9078554 , 0.92159916, 3.34606471], - [4.01421067, 1.01946042, 2.65231058], - [4.14238152, 0.86953764, 3.53206587], - [4.23577398, 0.72275914, 2.99813103], - [3.97409784, 0.72324305, 3.34116935], - [3.97223984, 0.30295342, 2.90222887], - [0.95288059, 3.43619989, 1.9003878 ], - [0.99352148, 2.97232682, 1.41851492], - [0.72661726, 3.51850037, 1.68457079], - [2.69898424, 3.33264308, 0.96940962], - [1.11074501, 3.35747592, 0.9112523 ], - [1.8143491 , 2.77550662, 0.35721918], - [1.00650285, 3.01808184, 1.59351202], - [3.31296552, 2.77360088, 1.50213315], - [1.14114175, 3.21148368, 1.11632078], - [2.42994048, 2.66294828, 0.77921299], - [3.73666782, 3.62389817, 1.97194958], - [1.45918639, 2.70011145, 0.77530513], - [2.74268279, 3.53658932, 1.25941769], - [1.28976474, 2.98813829, 0.66155141], - [2.05251547, 2.32311723, 0.73833453], - [0.98780965, 3.14311522, 1.46572707], - [1.67700171, 2.68234835, 0.80185102], - [2.12682734, 2.63954211, 0.568386 ], - [2.33743839, 3.97369206, 1.19987895], - [2.46667974, 2.87494798, 0.67881532], - [1.1880022 , 3.03853641, 1.34222961], - [1.63233668, 2.8022861 , 0.53061062], - [1.65142259, 3.68305664, 0.79234309], - [1.54593744, 2.96833851, 0.57371215], - [1.2933375 , 2.9760862 , 0.90589785], - [1.03085926, 3.13002382, 1.22490527], - [1.09304603, 3.56679427, 1.26783271], - [0.52050254, 3.5903606 , 1.42114042], - [1.34712856, 2.93839428, 0.58974672], - [2.44164622, 2.58203512, 0.76432091], - [2.69027665, 2.99796537, 0.89738242], - [2.76965187, 2.92597852, 0.98549851], - [2.02829879, 2.68907313, 0.3921368 ], - [1.4211892 , 3.42215998, 0.54223583], - [1.88799766, 2.62771445, 0.90567816], - [1.39853465, 2.75915071, 1.70872911], - [0.78009974, 3.30075052, 1.48190142], - [2.2083069 , 3.73017167, 1.06129323], - [1.87666989, 2.37943811, 0.81863359], - [2.41035271, 2.98789866, 0.599882 ], - [2.26782134, 2.89079656, 0.4914813 ], - [1.25085451, 2.86642713, 0.84409423], - [2.11791607, 2.86642575, 0.38941349], - [3.35089399, 2.96966239, 1.53271026], - [2.05312152, 2.77003779, 0.30831638], - [1.83091351, 2.38255534, 0.81726253], - [1.80454586, 2.55559903, 0.56428027], - [1.39825227, 2.8455521 , 0.72672271], - [3.06324547, 2.56987887, 1.28805849], - [1.89861511, 2.64007308, 0.38163798], - [1.0584579 , 4.24274589, 2.31271244], - [1.5185265 , 3.57067982, 0.76585766], - [0.52472 , 4.44150237, 2.14762671], - [0.77236486, 3.69480186, 1.17645413], - [0.53031563, 4.11613683, 1.73594932], - [1.2022172 , 5.03326801, 2.78128346], - [2.74462238, 3.3503222 , 1.22550604], - [0.92275933, 4.577021 , 2.2426558 ], - [1.40314162, 4.363498 , 1.50462864], - [1.48323372, 4.79334275, 3.22975724], - [0.4787491 , 3.62749566, 1.71837714], - [1.0325986 , 3.89360823, 1.10409694], - [0.27818948, 4.1132966 , 1.80475907], - [1.91870424, 3.82688169, 0.94858807], - [1.49910975, 3.91538879, 1.39433359], - [0.68622715, 3.89835633, 1.90677079], - [0.46463058, 3.70128288, 1.39713702], - [2.10127163, 5.18341242, 3.85224062], - [1.83092395, 5.58136629, 2.95786451], - [2.37017622, 4.02615768, 1.17790381], - [0.52540209, 4.31907679, 2.27442972], - [1.62249456, 3.4288432 , 0.91211061], - [1.47042293, 5.19031307, 2.77937737], - [1.15814207, 3.64273089, 0.84735471], - [0.520093 , 4.00723617, 2.15695444], - [0.66660166, 4.2637671 , 2.33581345], - [1.08324891, 3.45930032, 0.79774043], - [0.94925151, 3.27575645, 1.022307 ], - [0.84098317, 4.05342943, 1.3842265 ], - [0.75748198, 4.1585729 , 2.03854964], - [1.07124861, 4.71100584, 2.28297732], - [2.17345728, 5.12224641, 3.88774921], - [0.87682321, 4.13401784, 1.47357101], - [1.11534598, 3.39830644, 0.7964005 ], - [1.59782917, 3.63719075, 0.80521086], - [1.25982873, 5.08776655, 2.8607372 ], - [1.07214028, 4.00416552, 2.3101089 ], - [0.51434392, 3.58815834, 1.46990247], - [1.0762733 , 3.19454679, 0.97017134], - [0.23050145, 4.09907253, 1.97333575], - [0.57373487, 4.28416057, 2.07939567], - [0.51130902, 4.17402084, 2.06609741], - [1.5185265 , 3.57067982, 0.76585766], - [0.54141867, 4.32128686, 2.24723796], - [0.85128501, 4.3480018 , 2.42521977], - [0.52475835, 4.1240495 , 1.82594618], - [1.52100812, 3.97564407, 1.03093862], - [0.44371189, 3.7539635 , 1.44892686], - [1.08437101, 3.7969924 , 2.17585453], - [1.13739231, 3.25638099, 1.00508668]]) - - - -.. GENERATED FROM PYTHON SOURCE LINES 71-73 + array([[3.97220157, 0.21295824, 3.12241924], + [3.99787307, 0.99604549, 2.67093263], + [4.17484361, 0.65198444, 2.97003927], + [4.1789179 , 0.9034561 , 2.87439521], + [4.09527298, 0.40215457, 3.30139711], + [3.88570212, 1.21154793, 3.51109541], + [4.19951064, 0.50244932, 3.14534167], + [3.95532767, 0.09132468, 2.99164562], + [4.38817034, 1.42174651, 2.91632391], + [4.03786574, 0.78993078, 2.79062164], + [3.90506362, 0.78999385, 3.3258656 ], + [4.06107053, 0.27618123, 3.04773291], + [4.14433414, 1.03497888, 2.80131879], + [4.61165831, 1.33482453, 3.20416659], + [4.13289816, 1.63865558, 3.89627182], + [4.48651213, 2.39898792, 4.50815311], + [4.01625262, 1.20748818, 3.61501306], + [3.89697041, 0.21618828, 3.05706539], + [3.71098466, 1.20986655, 3.35219798], + [4.086616 , 0.86706182, 3.50365186], + [3.64546316, 0.50401564, 2.81135074], + [3.93005961, 0.66826437, 3.28039115], + [4.49472411, 0.68658071, 3.58831498], + [3.56212928, 0.47945627, 2.55875855], + [3.97077425, 0.36345425, 2.96339957], + [3.86402575, 0.99023912, 2.5530338 ], + [3.77367018, 0.22683089, 2.82754129], + [3.87807792, 0.2947186 , 3.06173653], + [3.86275216, 0.25361098, 2.95552455], + [4.07994174, 0.65019824, 2.87333352], + [3.99861021, 0.80138328, 2.72803208], + [3.55568868, 0.52309257, 2.73641792], + [4.49114086, 1.57658655, 4.12348735], + [4.43501224, 1.87652483, 4.23530312], + [3.95950739, 0.76858489, 2.71091534], + [4.00072575, 0.54896332, 2.86311402], + [3.78300091, 0.63079314, 3.0615144 ], + [4.23490423, 0.45982568, 3.40343436], + [4.40153286, 1.2336976 , 2.99955514], + [3.89069251, 0.14580827, 2.95527858], + [3.9951345 , 0.20261743, 3.12357878], + [4.62328438, 2.67055552, 2.88854512], + [4.40353892, 0.90927099, 3.14807862], + [3.68912032, 0.50081008, 2.86147193], + [3.8939904 , 0.92159916, 3.34925913], + [3.99460105, 1.01946042, 2.64670109], + [4.12768297, 0.86953764, 3.53518427], + [4.21738173, 0.72275914, 2.99333175], + [3.95835329, 0.72324305, 3.34508059], + [3.95372786, 0.30295342, 2.90117552], + [0.93688791, 3.43619989, 1.92126695], + [0.97602613, 2.97232682, 1.43652719], + [0.70684331, 3.51850037, 1.70536445], + [2.68055347, 3.33264308, 0.9535028 ], + [1.0882743 , 3.35747592, 0.93174089], + [1.79517817, 2.77550662, 0.36129564], + [0.99454069, 3.01808184, 1.60954394], + [3.2941058 , 2.77360088, 1.4844702 ], + [1.11736796, 3.21148368, 1.13778849], + [2.41310146, 2.66294828, 0.76189431], + [3.71811095, 3.62389817, 1.95510958], + [1.44115032, 2.70011145, 0.78733906], + [2.72209616, 3.53658932, 1.25528011], + [1.26899484, 2.98813829, 0.68046933], + [2.03366801, 2.32311723, 0.7386478 ], + [0.96655433, 3.14311522, 1.48632293], + [1.66143984, 2.68234835, 0.80567208], + [2.1051166 , 2.63954211, 0.57184189], + [2.32028627, 3.97369206, 1.19832221], + [2.44625761, 2.87494798, 0.66720431], + [1.17881254, 3.03853641, 1.35243001], + [1.61047248, 2.8022861 , 0.54870182], + [1.63287534, 3.68305664, 0.80222733], + [1.52405623, 2.96833851, 0.59281379], + [1.27009395, 2.9760862 , 0.92691417], + [1.00782039, 3.13002382, 1.24582165], + [1.07063185, 3.56679427, 1.28841255], + [0.49696775, 3.5903606 , 1.44058261], + [1.32774484, 2.93839428, 0.60609916], + [2.42011876, 2.58203512, 0.75979573], + [2.67031866, 2.99796537, 0.88332283], + [2.74916442, 2.92597852, 0.97295414], + [2.00733004, 2.68907313, 0.39450078], + [1.40409915, 3.42215998, 0.55254609], + [1.87359817, 2.62771445, 0.90409172], + [1.38942931, 2.75915071, 1.72093827], + [0.75864765, 3.30075052, 1.50231003], + [2.18888648, 3.73017167, 1.06440319], + [1.85885082, 2.37943811, 0.82259848], + [2.39150476, 2.98789866, 0.58180763], + [2.24875062, 2.89079656, 0.47613928], + [1.23068597, 2.86642713, 0.86151719], + [2.0971052 , 2.86642575, 0.38623038], + [3.33176642, 2.96966239, 1.51536817], + [2.03396275, 2.77003779, 0.29666246], + [1.81194891, 2.38255534, 0.82460449], + [1.78545021, 2.55559903, 0.57017344], + [1.37597729, 2.8455521 , 0.74674716], + [3.04410524, 2.56987887, 1.27214703], + [1.8789993 , 2.64007308, 0.3839694 ], + [1.07518249, 4.24274589, 2.3231268 ], + [1.50664789, 3.57067982, 0.76586458], + [0.53503117, 4.44150237, 2.16460668], + [0.75898803, 3.69480186, 1.19079906], + [0.53772877, 4.11613683, 1.74890266], + [1.21258624, 5.03326801, 2.79880246], + [2.73154786, 3.3503222 , 1.20493915], + [0.92230348, 4.577021 , 2.26064879], + [1.39208869, 4.363498 , 1.51591765], + [1.50326811, 4.79334275, 3.24565892], + [0.4769421 , 3.62749566, 1.73398281], + [1.01951679, 3.89360823, 1.11653632], + [0.28300998, 4.1132966 , 1.8209098 ], + [1.90771307, 3.82688169, 0.94077286], + [1.49665868, 3.91538879, 1.39463942], + [0.69648261, 3.89835633, 1.91923245], + [0.44800197, 3.70128288, 1.41396325], + [2.11710726, 5.18341242, 3.87020286], + [1.8369882 , 5.58136629, 2.97188789], + [2.35409464, 4.02615768, 1.17309015], + [0.54832687, 4.31907679, 2.29006262], + [1.61278191, 3.4288432 , 0.90897245], + [1.47513288, 5.19031307, 2.79625003], + [1.14174615, 3.64273089, 0.86066075], + [0.53620233, 4.00723617, 2.1727675 ], + [0.67208548, 4.2637671 , 2.35489518], + [1.06663455, 3.45930032, 0.81175026], + [0.93494533, 3.27575645, 1.03569718], + [0.83553248, 4.05342943, 1.39558948], + [0.74854302, 4.1585729 , 2.05837295], + [1.07047627, 4.71100584, 2.30034327], + [2.18621697, 5.12224641, 3.90678521], + [0.87437682, 4.13401784, 1.48399598], + [1.0947599 , 3.39830644, 0.81512611], + [1.58049956, 3.63719075, 0.81393069], + [1.27157117, 5.08776655, 2.87804494], + [1.08615557, 4.00416552, 2.32144511], + [0.50258278, 3.58815834, 1.4859412 ], + [1.0625801 , 3.19454679, 0.98183333], + [0.24502791, 4.09907253, 1.9903383 ], + [0.59235157, 4.28416057, 2.0929821 ], + [0.52380862, 4.17402084, 2.08177428], + [1.50664789, 3.57067982, 0.76586458], + [0.56488438, 4.32128686, 2.26218919], + [0.87242105, 4.3480018 , 2.43867573], + [0.53197842, 4.1240495 , 1.84004429], + [1.50771114, 3.97564407, 1.03723854], + [0.43328422, 3.7539635 , 1.46435048], + [1.09439379, 3.7969924 , 2.186898 ], + [1.12558627, 3.25638099, 1.0145515 ]]) + + + +.. GENERATED FROM PYTHON SOURCE LINES 69-71 We compute every step and compare ONNX and scikit-learn outputs. -.. GENERATED FROM PYTHON SOURCE LINES 73-93 +.. GENERATED FROM PYTHON SOURCE LINES 71-92 .. code-block:: default @@ -311,7 +309,8 @@ ONNX and scikit-learn outputs. print('----------------------------') print(step['model']) onnx_step = step['onnx_step'] - sess = InferenceSession(onnx_step.SerializeToString()) + sess = InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)}) onnx_output = onnx_outputs[-1] skl_outputs = step['model']._debug.outputs['transform'] @@ -332,21 +331,19 @@ ONNX and scikit-learn outputs. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none ---------------------------- StandardScaler() difference 4.799262827148709e-07 ---------------------------- - KMeans(n_clusters=3) + KMeans(n_clusters=3, n_init=3) difference 1.095537650763756e-06 -.. GENERATED FROM PYTHON SOURCE LINES 94-101 +.. GENERATED FROM PYTHON SOURCE LINES 93-100 Python runtime to look into every node ++++++++++++++++++++++++++++++++++++++ @@ -356,13 +353,14 @@ into every node of the ONNX graph. This option can be used to check when the computation fails due to nan values or a dimension mismatch. -.. GENERATED FROM PYTHON SOURCE LINES 101-109 +.. GENERATED FROM PYTHON SOURCE LINES 100-109 .. code-block:: default - onx = to_onnx(pipe, X[:1].astype(numpy.float32)) + onx = to_onnx(pipe, X[:1].astype(numpy.float32), + target_opset=17) oinf = OnnxInference(onx) oinf.run({'X': X[:2].astype(numpy.float32)}, @@ -374,12 +372,10 @@ fails due to nan values or a dimension mismatch. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - +ki='Ad_Addcst': (3,) (dtype=float32 min=0.9830552339553833 max=5.035177230834961) - +ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.1359702348709106) + +ki='Ad_Addcst': (3,) (dtype=float32 min=1.0029560327529907 max=5.035177230834961) + +ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.131404995918274) +ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0) -- OnnxInference: run 8 nodes with 1 inputs Onnx-Scaler(X) -> variable (name='Scaler') @@ -389,18 +385,18 @@ fails due to nan values or a dimension mismatch. Onnx-Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0 (name='Mu_Mul') +kr='Mu_C0': (2, 1) (dtype=float32 min=0.0 max=0.0) Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0 (name='Ge_Gemm') - +kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.967348575592041) + +kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.877023220062256) Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01 (name='Ad_Add') - +kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.817853927612305) + +kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.727529525756836) Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0 (name='Ad_Add1') - +kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=16.143783569335938) - Onnx-Sqrt(Ad_C0) -> scores (name='Sq_Sqrt') - +kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=4.017932891845703) + +kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=15.982987403869629) Onnx-ArgMin(Ad_C0) -> label (name='Ar_ArgMin') +kr='label': (2,) (dtype=int64 min=1 max=1) + Onnx-Sqrt(Ad_C0) -> scores (name='Sq_Sqrt') + +kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=3.997872829437256) - {'label': array([1, 1], dtype=int64), 'scores': array([[3.9894059 , 0.21295893, 3.1211984 ], - [4.017933 , 0.99604493, 2.675508 ]], dtype=float32)} + {'label': array([1, 1]), 'scores': array([[3.9722016 , 0.21295893, 3.1224194 ], + [3.9978728 , 0.99604493, 2.6709325 ]], dtype=float32)} @@ -425,16 +421,14 @@ And to get a sense of the intermediate results. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - +ki='Ad_Addcst': (3,) (dtype=float32 min=0.9830552339553833 max=5.035177230834961 - [3.32593 5.035177 0.98305523] - +ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.1359702348709106 - [[ 1.1359702 0.08842168 0.9961545 1.0175261 ] + +ki='Ad_Addcst': (3,) (dtype=float32 min=1.0029560327529907 max=5.035177230834961 + [3.255458 5.035177 1.002956] + +ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.131404995918274 + [[ 1.131405 0.07903422 0.98537153 0.9990883 ] [-1.0145789 0.85326266 -1.3049873 -1.2548935 ] - [-0.05021989 -0.8833765 0.34773782 0.2815273 ]] + [-0.06881714 -0.89339954 0.3452218 0.284393 ]] +ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0 [0.] -kv='X' shape=(2, 4) dtype=float32 min=0.20000000298023224 max=5.099999904632568 @@ -452,29 +446,29 @@ And to get a sense of the intermediate results. [[0.] [0.]] Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0 (name='Ge_Gemm') - +kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.967348575592041) - [[ 7.2132325 -10.366023 3.3826268] - [ 7.9673486 -8.893578 1.3247827]] + +kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.877023220062256) + [[ 7.14673 -10.366023 3.370349 ] + [ 7.877023 -8.893578 1.2804183]] Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01 (name='Ad_Add') - +kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.817853927612305) - [[12.58943 -4.9898257 8.758824 ] - [12.817854 -4.0430717 6.1752887]] + +kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.727529525756836) + [[12.522927 -4.9898257 8.746546 ] + [12.72753 -4.0430717 6.130924 ]] Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0 (name='Ad_Add1') - +kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=16.143783569335938) - [[15.9153595 0.04535151 9.741879 ] - [16.143784 0.9921055 7.158344 ]] - Onnx-Sqrt(Ad_C0) -> scores (name='Sq_Sqrt') - +kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=4.017932891845703) - [[3.9894059 0.21295893 3.1211984 ] - [4.017933 0.99604493 2.675508 ]] + +kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=15.982987403869629) + [[15.778385 0.04535151 9.749502 ] + [15.982987 0.9921055 7.13388 ]] Onnx-ArgMin(Ad_C0) -> label (name='Ar_ArgMin') +kr='label': (2,) (dtype=int64 min=1 max=1) [1 1] + Onnx-Sqrt(Ad_C0) -> scores (name='Sq_Sqrt') + +kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=3.997872829437256) + [[3.9722016 0.21295893 3.1224194 ] + [3.9978728 0.99604493 2.6709325 ]] [VALIDATE] type [VALIDATE] mis={} - {'label': array([1, 1], dtype=int64), 'scores': array([[3.9894059 , 0.21295893, 3.1211984 ], - [4.017933 , 0.99604493, 2.675508 ]], dtype=float32)} + {'label': array([1, 1]), 'scores': array([[3.9722016 , 0.21295893, 3.1224194 ], + [3.9978728 , 0.99604493, 2.6709325 ]], dtype=float32)} @@ -506,35 +500,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.573 seconds) + **Total running time of the script:** ( 0 minutes 0.505 seconds) .. _sphx_glr_download_auto_tutorial_plot_fbegin_investigate.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_fbegin_investigate.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_fbegin_investigate.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_fbegin_investigate.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_fbegin_investigate.ipynb ` + :download:`Download Jupyter notebook: plot_fbegin_investigate.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gbegin_cst.rst.txt b/_sources/auto_tutorial/plot_gbegin_cst.rst.txt index 91412bddc..9016d9c05 100644 --- a/_sources/auto_tutorial/plot_gbegin_cst.rst.txt +++ b/_sources/auto_tutorial/plot_gbegin_cst.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gbegin_cst.py" +.. "auto_tutorial/plot_gbegin_cst.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -28,16 +28,13 @@ or other informations like a vocabulary. Last sections shows how to remove an output or to promote an intermediate result to an output. -.. contents:: - :local: - Train and convert a model +++++++++++++++++++++++++ We download one model from the :epkg:`ONNX Zoo` but the model could be trained and produced by another converter library. -.. GENERATED FROM PYTHON SOURCE LINES 23-45 +.. GENERATED FROM PYTHON SOURCE LINES 20-42 .. code-block:: default @@ -69,23 +66,21 @@ could be trained and produced by another converter library. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_sag.py:350: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge + /home/xadupre/github/scikit-learn/sklearn/linear_model/_sag.py:350: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge warnings.warn( -.. GENERATED FROM PYTHON SOURCE LINES 46-49 +.. GENERATED FROM PYTHON SOURCE LINES 43-46 Add training parameter ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 49-55 +.. GENERATED FROM PYTHON SOURCE LINES 46-52 .. code-block:: default @@ -102,12 +97,12 @@ Add training parameter -.. GENERATED FROM PYTHON SOURCE LINES 56-58 +.. GENERATED FROM PYTHON SOURCE LINES 53-55 Inference +++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 58-66 +.. GENERATED FROM PYTHON SOURCE LINES 55-63 .. code-block:: default @@ -125,22 +120,20 @@ Inference .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none output names: ['label', 'probabilities', 'C', 'l1_ratio'] outputs - [array([2, 2], dtype=int64), - array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01], - [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32), + [array([2, 0], dtype=int64), + array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01], + [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32), array([2.]), array([0.5])] -.. GENERATED FROM PYTHON SOURCE LINES 67-77 +.. GENERATED FROM PYTHON SOURCE LINES 64-74 The major draw back of this solution is increase the prediction time as onnxruntime copies the constants for every prediction. @@ -153,7 +146,7 @@ Select outputs Next function removes unneeded outputs from a model, not only the constants. Next model only keeps the probabilities. -.. GENERATED FROM PYTHON SOURCE LINES 77-89 +.. GENERATED FROM PYTHON SOURCE LINES 74-86 .. code-block:: default @@ -175,19 +168,17 @@ not only the constants. Next model only keeps the probabilities. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none output names: ['probabilities'] outputs - [array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01], - [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32)] + [array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01], + [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32)] -.. GENERATED FROM PYTHON SOURCE LINES 90-95 +.. GENERATED FROM PYTHON SOURCE LINES 87-92 This example only uses ONNX graph in memory and never saves or loads a model. This can be done by using the following snippets of code. @@ -195,7 +186,7 @@ model. This can be done by using the following snippets of code. Save a model ++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 95-99 +.. GENERATED FROM PYTHON SOURCE LINES 92-96 .. code-block:: default @@ -210,12 +201,12 @@ Save a model -.. GENERATED FROM PYTHON SOURCE LINES 100-102 +.. GENERATED FROM PYTHON SOURCE LINES 97-99 Load a model ++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 102-111 +.. GENERATED FROM PYTHON SOURCE LINES 99-108 .. code-block:: default @@ -234,14 +225,12 @@ Load a model .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none output names: ['probabilities'] outputs - [array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01], - [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32)] + [array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01], + [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32)] @@ -249,35 +238,23 @@ Load a model .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.081 seconds) + **Total running time of the script:** ( 0 minutes 0.040 seconds) .. _sphx_glr_download_auto_tutorial_plot_gbegin_cst.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gbegin_cst.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gbegin_cst.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gbegin_cst.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gbegin_cst.ipynb ` + :download:`Download Jupyter notebook: plot_gbegin_cst.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gbegin_dataframe.rst.txt b/_sources/auto_tutorial/plot_gbegin_dataframe.rst.txt index 91727a08d..07d07266c 100644 --- a/_sources/auto_tutorial/plot_gbegin_dataframe.rst.txt +++ b/_sources/auto_tutorial/plot_gbegin_dataframe.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gbegin_dataframe.py" +.. "auto_tutorial/plot_gbegin_dataframe.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -28,13 +28,10 @@ if all the data share the same type. But data held in a dataframe have usually multiple types, float, integer or string for categories. ONNX also supports that case. -.. contents:: - :local: - A dataset with categories +++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 21-60 +.. GENERATED FROM PYTHON SOURCE LINES 18-57 .. code-block:: default @@ -85,35 +82,35 @@ A dataset with categories .. raw:: html
-
Pipeline(steps=[('preprocess',
+    
Pipeline(steps=[('preprocess',
                      ColumnTransformer(remainder='passthrough',
                                        transformers=[('cat',
                                                       Pipeline(steps=[('onehot',
                                                                        OneHotEncoder(handle_unknown='ignore',
                                                                                      sparse=False))]),
                                                       ['CAT1', 'CAT2'])])),
-                    ('rf', RandomForestClassifier())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
['CAT1', 'CAT2']
OneHotEncoder(handle_unknown='ignore', sparse=False)
['num1', 'num2']
passthrough
RandomForestClassifier()


-.. GENERATED FROM PYTHON SOURCE LINES 61-62 +.. GENERATED FROM PYTHON SOURCE LINES 58-59 Display. -.. GENERATED FROM PYTHON SOURCE LINES 62-68 +.. GENERATED FROM PYTHON SOURCE LINES 59-65 .. code-block:: default @@ -135,14 +132,14 @@ Display. -.. GENERATED FROM PYTHON SOURCE LINES 69-73 +.. GENERATED FROM PYTHON SOURCE LINES 66-70 Conversion to ONNX ++++++++++++++++++ Function *to_onnx* does not handle dataframes. -.. GENERATED FROM PYTHON SOURCE LINES 73-80 +.. GENERATED FROM PYTHON SOURCE LINES 70-77 .. code-block:: default @@ -160,11 +157,11 @@ Function *to_onnx* does not handle dataframes. -.. GENERATED FROM PYTHON SOURCE LINES 81-82 +.. GENERATED FROM PYTHON SOURCE LINES 78-79 But it possible to use an extended one. -.. GENERATED FROM PYTHON SOURCE LINES 82-88 +.. GENERATED FROM PYTHON SOURCE LINES 79-85 .. code-block:: default @@ -181,12 +178,12 @@ But it possible to use an extended one. -.. GENERATED FROM PYTHON SOURCE LINES 89-91 +.. GENERATED FROM PYTHON SOURCE LINES 86-88 Graph +++++ -.. GENERATED FROM PYTHON SOURCE LINES 91-99 +.. GENERATED FROM PYTHON SOURCE LINES 88-96 .. code-block:: default @@ -210,14 +207,14 @@ Graph -.. GENERATED FROM PYTHON SOURCE LINES 100-104 +.. GENERATED FROM PYTHON SOURCE LINES 97-101 Prediction with ONNX ++++++++++++++++++++ *onnxruntime* does not support dataframes. -.. GENERATED FROM PYTHON SOURCE LINES 104-112 +.. GENERATED FROM PYTHON SOURCE LINES 101-109 .. code-block:: default @@ -235,14 +232,12 @@ Prediction with ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none run(): incompatible function arguments. The following argument types are supported: 1. (self: onnxruntime.capi.onnxruntime_pybind11_state.InferenceSession, arg0: List[str], arg1: Dict[str, object], arg2: onnxruntime.capi.onnxruntime_pybind11_state.RunOptions) -> List[object] - Invoked with: , ['label', 'probabilities'], CAT1 CAT2 num1 num2 + Invoked with: , ['label', 'probabilities'], CAT1 CAT2 num1 num2 0 a c 0.50 0.60 1 b d 0.40 0.80 2 a d 0.50 0.56 @@ -253,11 +248,11 @@ Prediction with ONNX -.. GENERATED FROM PYTHON SOURCE LINES 113-114 +.. GENERATED FROM PYTHON SOURCE LINES 110-111 Let's use a shortcut -.. GENERATED FROM PYTHON SOURCE LINES 114-120 +.. GENERATED FROM PYTHON SOURCE LINES 111-117 .. code-block:: default @@ -273,8 +268,6 @@ Let's use a shortcut .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [0 1 0 1 0 1] @@ -283,11 +276,11 @@ Let's use a shortcut -.. GENERATED FROM PYTHON SOURCE LINES 121-122 +.. GENERATED FROM PYTHON SOURCE LINES 118-119 And probilities. -.. GENERATED FROM PYTHON SOURCE LINES 122-126 +.. GENERATED FROM PYTHON SOURCE LINES 119-123 .. code-block:: default @@ -301,27 +294,25 @@ And probilities. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[0.75 0.25] - [0.26 0.74] - [0.79 0.21] - [0.31 0.69] - [0.7 0.3 ] - [0.31 0.69]] - [[0.75 0.25 ] - [0.26 0.74 ] - [0.78999996 0.21000001] - [0.31 0.69 ] - [0.70000005 0.29999998] - [0.30999994 0.69000006]] + [[0.79 0.21] + [0.34 0.66] + [0.73 0.27] + [0.28 0.72] + [0.71 0.29] + [0.34 0.66]] + [[0.79 0.20999998] + [0.34000003 0.65999997] + [0.73 0.26999998] + [0.27999997 0.72 ] + [0.71000004 0.28999996] + [0.34000003 0.65999997]] -.. GENERATED FROM PYTHON SOURCE LINES 127-137 +.. GENERATED FROM PYTHON SOURCE LINES 124-134 It looks ok. Let's dig into the details to directly use *onnxruntime*. @@ -334,7 +325,7 @@ different types. That's what ONNX should see: a list of inputs, the input name is the column name, the input type is the column type. -.. GENERATED FROM PYTHON SOURCE LINES 137-143 +.. GENERATED FROM PYTHON SOURCE LINES 134-140 .. code-block:: default @@ -350,8 +341,6 @@ the input type is the column type. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [('CAT1', StringTensorType(shape=[None, 1])), @@ -362,11 +351,11 @@ the input type is the column type. -.. GENERATED FROM PYTHON SOURCE LINES 144-145 +.. GENERATED FROM PYTHON SOURCE LINES 141-142 Let's use float instead. -.. GENERATED FROM PYTHON SOURCE LINES 145-154 +.. GENERATED FROM PYTHON SOURCE LINES 142-151 .. code-block:: default @@ -385,8 +374,6 @@ Let's use float instead. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [('CAT1', StringTensorType(shape=[None, 1])), @@ -397,11 +384,11 @@ Let's use float instead. -.. GENERATED FROM PYTHON SOURCE LINES 155-156 +.. GENERATED FROM PYTHON SOURCE LINES 152-153 Let's convert with *skl2onnx* only. -.. GENERATED FROM PYTHON SOURCE LINES 156-161 +.. GENERATED FROM PYTHON SOURCE LINES 153-158 .. code-block:: default @@ -417,14 +404,14 @@ Let's convert with *skl2onnx* only. -.. GENERATED FROM PYTHON SOURCE LINES 162-166 +.. GENERATED FROM PYTHON SOURCE LINES 159-163 Let's run it with onnxruntime. We need to convert the dataframe into a dictionary where column names become keys, and column values become values. -.. GENERATED FROM PYTHON SOURCE LINES 166-171 +.. GENERATED FROM PYTHON SOURCE LINES 163-168 .. code-block:: default @@ -439,8 +426,6 @@ values. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none {'CAT1': array([['a'], @@ -471,11 +456,11 @@ values. -.. GENERATED FROM PYTHON SOURCE LINES 172-173 +.. GENERATED FROM PYTHON SOURCE LINES 169-170 Inference. -.. GENERATED FROM PYTHON SOURCE LINES 173-181 +.. GENERATED FROM PYTHON SOURCE LINES 170-178 .. code-block:: default @@ -493,8 +478,6 @@ Inference. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [0 1 0 1 0 1] @@ -503,11 +486,11 @@ Inference. -.. GENERATED FROM PYTHON SOURCE LINES 182-183 +.. GENERATED FROM PYTHON SOURCE LINES 179-180 And probilities. -.. GENERATED FROM PYTHON SOURCE LINES 183-186 +.. GENERATED FROM PYTHON SOURCE LINES 180-183 .. code-block:: default @@ -520,22 +503,20 @@ And probilities. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[0.75 0.25] - [0.26 0.74] - [0.79 0.21] - [0.31 0.69] - [0.7 0.3 ] - [0.31 0.69]] - [[0.75 0.25000003] - [0.2600004 0.7399996 ] - [0.78999996 0.21000002] - [0.31000036 0.68999964] - [0.70000005 0.29999998] - [0.31000036 0.68999964]] + [[0.79 0.21] + [0.34 0.66] + [0.73 0.27] + [0.28 0.72] + [0.71 0.29] + [0.34 0.66]] + [[0.78999996 0.21000002] + [0.34000033 0.65999967] + [0.73 0.27 ] + [0.2800004 0.7199996 ] + [0.71000004 0.29 ] + [0.34000033 0.65999967]] @@ -543,35 +524,23 @@ And probilities. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 1.416 seconds) + **Total running time of the script:** ( 0 minutes 1.130 seconds) .. _sphx_glr_download_auto_tutorial_plot_gbegin_dataframe.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gbegin_dataframe.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gbegin_dataframe.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gbegin_dataframe.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gbegin_dataframe.ipynb ` + :download:`Download Jupyter notebook: plot_gbegin_dataframe.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gbegin_transfer_learning.rst.txt b/_sources/auto_tutorial/plot_gbegin_transfer_learning.rst.txt index 609dd910c..14dd82b09 100644 --- a/_sources/auto_tutorial/plot_gbegin_transfer_learning.rst.txt +++ b/_sources/auto_tutorial/plot_gbegin_transfer_learning.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gbegin_transfer_learning.py" +.. "auto_tutorial/plot_gbegin_transfer_learning.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -36,16 +36,13 @@ requires work. ONNX reduces the number of platforms to support. Once the model is converted into ONNX, it can be inserted in any :epkg:`scikit-learn` pipeline. -.. contents:: - :local: - Retrieve and load a model +++++++++++++++++++++++++ We download one model from the :epkg:`ONNX Zoo` but the model could be trained and produced by another converter library. -.. GENERATED FROM PYTHON SOURCE LINES 31-75 +.. GENERATED FROM PYTHON SOURCE LINES 28-72 .. code-block:: default @@ -99,21 +96,18 @@ could be trained and produced by another converter library. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - download 'https://github.com/onnx/models/raw/main/vision/classification/squeezenet/model/squeezenet1.1-7.onnx' - downloaded 4956208 bytes. + 'squeezenet1.1-7.onnx' already downloaded -.. GENERATED FROM PYTHON SOURCE LINES 76-77 +.. GENERATED FROM PYTHON SOURCE LINES 73-74 Loading the ONNX file and use it on one image. -.. GENERATED FROM PYTHON SOURCE LINES 77-83 +.. GENERATED FROM PYTHON SOURCE LINES 74-80 .. code-block:: default @@ -129,8 +123,6 @@ Loading the ONNX file and use it on one image. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none NodeArg(name='data', type='tensor(float)', shape=[1, 3, 224, 224]) @@ -138,17 +130,17 @@ Loading the ONNX file and use it on one image. -.. GENERATED FROM PYTHON SOURCE LINES 84-86 +.. GENERATED FROM PYTHON SOURCE LINES 81-83 The model expects a series of images of size `[3, 224, 224]`. -.. GENERATED FROM PYTHON SOURCE LINES 88-90 +.. GENERATED FROM PYTHON SOURCE LINES 85-87 Classifying an image ++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 90-100 +.. GENERATED FROM PYTHON SOURCE LINES 87-97 .. code-block:: default @@ -168,21 +160,18 @@ Classifying an image .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - download 'https://upload.wikimedia.org/wikipedia/commons/d/d2/East_Coker_elm%2C_2.jpg' - downloaded 712230 bytes. + 'East_Coker_elm.jpg' already downloaded -.. GENERATED FROM PYTHON SOURCE LINES 101-102 +.. GENERATED FROM PYTHON SOURCE LINES 98-99 Image to numpy and predection. -.. GENERATED FROM PYTHON SOURCE LINES 102-117 +.. GENERATED FROM PYTHON SOURCE LINES 99-114 .. code-block:: default @@ -207,20 +196,18 @@ Image to numpy and predection. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [145.59464 55.06765 60.599792 46.293953 37.982464] + [145.59459 55.06765 60.599808 46.293957 37.982475] -.. GENERATED FROM PYTHON SOURCE LINES 118-119 +.. GENERATED FROM PYTHON SOURCE LINES 115-116 Interpretation -.. GENERATED FROM PYTHON SOURCE LINES 119-124 +.. GENERATED FROM PYTHON SOURCE LINES 116-121 .. code-block:: default @@ -235,16 +222,14 @@ Interpretation .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [(205.84174, 'Samoyed, Samoyede'), (212.03664, 'park bench'), (225.50691, 'lakeside, lakeshore'), (232.90251, 'fountain'), (258.10968, 'geyser')] + [(205.84172, 'Samoyed, Samoyede'), (212.0366, 'park bench'), (225.50687, 'lakeside, lakeshore'), (232.90251, 'fountain'), (258.10965, 'geyser')] -.. GENERATED FROM PYTHON SOURCE LINES 125-130 +.. GENERATED FROM PYTHON SOURCE LINES 122-127 Classifying more images +++++++++++++++++++++++ @@ -252,7 +237,7 @@ Classifying more images The initial image is rotated, the answer is changing. -.. GENERATED FROM PYTHON SOURCE LINES 130-156 +.. GENERATED FROM PYTHON SOURCE LINES 127-153 .. code-block:: default @@ -293,31 +278,31 @@ the answer is changing. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - angle=-12.0 - [(247.06146, 'obelisk'), (238.95372, 'car mirror'), (235.27646, 'flagpole, flagstaff'), (231.51707, 'window screen'), (230.90657, 'picket fence, paling')] - angle=-10.0 - [(254.24683, 'car mirror'), (251.51357, 'obelisk'), (235.10512, 'groom, bridegroom'), (234.5295, 'picket fence, paling'), (232.13913, 'church, church building')] - angle=-8.0 - [(235.56952, 'obelisk'), (226.59697, 'car mirror'), (226.46773, 'picket fence, paling'), (221.46794, 'groom, bridegroom'), (220.88506, 'fountain')] - angle=-6.0 - [(265.50806, 'geyser'), (243.68619, 'obelisk'), (238.92957, 'fountain'), (226.73683, 'pedestal, plinth, footstall'), (226.11952, 'Great Pyrenees')] - angle=-4.0 - [(287.7449, 'geyser'), (255.25323, 'fountain'), (236.84944, 'obelisk'), (223.02913, 'Great Pyrenees'), (222.80464, 'church, church building')] - angle=-2.0 - [(267.63528, 'geyser'), (251.48958, 'fountain'), (214.64241, 'obelisk'), (214.56227, 'mobile home, manufactured home'), (213.12424, 'flagpole, flagstaff')] - angle=0.0 - [(258.10968, 'geyser'), (232.90251, 'fountain'), (225.50691, 'lakeside, lakeshore'), (212.03664, 'park bench'), (205.84174, 'Samoyed, Samoyede')] - angle=2.0 - [(222.74826, 'geyser'), (213.38457, 'fountain'), (212.24376, 'obelisk'), (198.3714, 'beacon, lighthouse, beacon light, pharos'), (197.43805, 'picket fence, paling')] - angle=4.0 - [(221.34749, 'geyser'), (209.60362, 'fountain'), (207.0692, 'American egret, great white heron, Egretta albus'), (201.63098, 'obelisk'), (198.75673, 'Great Pyrenees')] - angle=6.0 - [(230.98735, 'American egret, great white heron, Egretta albus'), (216.6342, 'fountain'), (212.73236, 'groom, bridegroom'), (209.60934, 'flagpole, flagstaff'), (209.46207, 'swimming trunks, bathing trunks')] - angle=8.0 - [(253.32706, 'American egret, great white heron, Egretta albus'), (222.6997, 'golf ball'), (222.50499, 'groom, bridegroom'), (222.36351, 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita'), (217.73135, 'swimming trunks, bathing trunks')] - angle=10.0 - [(244.3011, 'solar dish, solar collector, solar furnace'), (239.57332, 'flagpole, flagstaff'), (234.92139, 'picket fence, paling'), (230.62114, 'car mirror'), (221.8794, 'screen, CRT screen')] + angle=-12.0 - [(247.06139, 'obelisk'), (238.9538, 'car mirror'), (235.27649, 'flagpole, flagstaff'), (231.5171, 'window screen'), (230.90662, 'picket fence, paling')] + angle=-10.0 - [(254.24677, 'car mirror'), (251.51357, 'obelisk'), (235.10507, 'groom, bridegroom'), (234.52951, 'picket fence, paling'), (232.13913, 'church, church building')] + angle=-8.0 - [(235.5695, 'obelisk'), (226.59703, 'car mirror'), (226.46768, 'picket fence, paling'), (221.46794, 'groom, bridegroom'), (220.88501, 'fountain')] + angle=-6.0 - [(265.508, 'geyser'), (243.68616, 'obelisk'), (238.9296, 'fountain'), (226.73679, 'pedestal, plinth, footstall'), (226.11945, 'Great Pyrenees')] + angle=-4.0 - [(287.74472, 'geyser'), (255.25317, 'fountain'), (236.84944, 'obelisk'), (223.02904, 'Great Pyrenees'), (222.80466, 'church, church building')] + angle=-2.0 - [(267.6353, 'geyser'), (251.4896, 'fountain'), (214.64238, 'obelisk'), (214.56232, 'mobile home, manufactured home'), (213.12415, 'flagpole, flagstaff')] + angle=0.0 - [(258.10965, 'geyser'), (232.90251, 'fountain'), (225.50687, 'lakeside, lakeshore'), (212.0366, 'park bench'), (205.84172, 'Samoyed, Samoyede')] + angle=2.0 - [(222.74826, 'geyser'), (213.38455, 'fountain'), (212.24373, 'obelisk'), (198.37132, 'beacon, lighthouse, beacon light, pharos'), (197.4381, 'picket fence, paling')] + angle=4.0 - [(221.34743, 'geyser'), (209.60362, 'fountain'), (207.06918, 'American egret, great white heron, Egretta albus'), (201.63097, 'obelisk'), (198.7567, 'Great Pyrenees')] + angle=6.0 - [(230.9874, 'American egret, great white heron, Egretta albus'), (216.63417, 'fountain'), (212.73239, 'groom, bridegroom'), (209.60934, 'flagpole, flagstaff'), (209.46214, 'swimming trunks, bathing trunks')] + angle=8.0 - [(253.32697, 'American egret, great white heron, Egretta albus'), (222.6997, 'golf ball'), (222.50497, 'groom, bridegroom'), (222.36351, 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita'), (217.73135, 'swimming trunks, bathing trunks')] + angle=10.0 - [(244.30106, 'solar dish, solar collector, solar furnace'), (239.57333, 'flagpole, flagstaff'), (234.92139, 'picket fence, paling'), (230.62112, 'car mirror'), (221.87936, 'screen, CRT screen')] - array([[, , , ], - [, , , ], - [, , , ]], - dtype=object) + array([[, , , + ], + [, , , + ], + [, , , + ]], dtype=object) -.. GENERATED FROM PYTHON SOURCE LINES 157-163 +.. GENERATED FROM PYTHON SOURCE LINES 154-160 Transfer learning in a pipeline +++++++++++++++++++++++++++++++ @@ -326,7 +311,7 @@ The proposed transfer learning consists using a PCA to projet the probabilities on a graph. -.. GENERATED FROM PYTHON SOURCE LINES 163-181 +.. GENERATED FROM PYTHON SOURCE LINES 160-178 .. code-block:: default @@ -354,32 +339,30 @@ on a graph. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [[-676.57574 -203.35437 ] - [-570.6653 -208.0976 ] - [-339.81192 -86.34015 ] - [ -14.556089 -168.44864 ] - [ 357.22345 -157.61432 ] - [ 596.3862 -90.20996 ] - [ 918.8612 -26.33939 ] - [ 499.87164 128.27281 ] - [ 306.68573 156.42908 ] - [-125.91209 119.21805 ] - [-446.6046 342.45844 ] - [-504.90253 194.02596 ]] + [[-676.5764 -203.35454 ] + [-570.6655 -208.09769 ] + [-339.81168 -86.33986 ] + [ -14.555651 -168.44836 ] + [ 357.22372 -157.61395 ] + [ 596.38617 -90.210175] + [ 918.8613 -26.339687] + [ 499.87177 128.2728 ] + [ 306.68564 156.42897 ] + [-125.91207 119.218216] + [-446.60468 342.45862 ] + [-504.90256 194.02576 ]] -.. GENERATED FROM PYTHON SOURCE LINES 182-184 +.. GENERATED FROM PYTHON SOURCE LINES 179-181 Graph for the PCA ----------------- -.. GENERATED FROM PYTHON SOURCE LINES 184-196 +.. GENERATED FROM PYTHON SOURCE LINES 181-193 .. code-block:: default @@ -407,7 +390,7 @@ Graph for the PCA -.. GENERATED FROM PYTHON SOURCE LINES 197-203 +.. GENERATED FROM PYTHON SOURCE LINES 194-200 Remove one layer at the end --------------------------- @@ -416,7 +399,7 @@ The last is often removed before the model is inserted in a pipeline. Let's see how to do that. First, we need the list of output for every node. -.. GENERATED FROM PYTHON SOURCE LINES 203-211 +.. GENERATED FROM PYTHON SOURCE LINES 200-208 .. code-block:: default @@ -434,8 +417,6 @@ First, we need the list of output for every node. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none squeezenet0_conv0_fwd ['squeezenet0_conv0_fwd'] @@ -508,11 +489,11 @@ First, we need the list of output for every node. -.. GENERATED FROM PYTHON SOURCE LINES 212-213 +.. GENERATED FROM PYTHON SOURCE LINES 209-210 We select one of the last one. -.. GENERATED FROM PYTHON SOURCE LINES 213-217 +.. GENERATED FROM PYTHON SOURCE LINES 210-214 .. code-block:: default @@ -526,8 +507,6 @@ We select one of the last one. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none selected squeezenet0_relu25_fwd @@ -535,13 +514,13 @@ We select one of the last one. -.. GENERATED FROM PYTHON SOURCE LINES 218-221 +.. GENERATED FROM PYTHON SOURCE LINES 215-218 And we tell *OnnxTransformer* to use that specific one and to flatten the output as the dimension is not a matrix. -.. GENERATED FROM PYTHON SOURCE LINES 221-235 +.. GENERATED FROM PYTHON SOURCE LINES 218-232 .. code-block:: default @@ -566,7 +545,7 @@ as the dimension is not a matrix. -.. GENERATED FROM PYTHON SOURCE LINES 236-241 +.. GENERATED FROM PYTHON SOURCE LINES 233-238 We check that it is different. The following values are the shape of the @@ -574,7 +553,7 @@ PCA components. The number of column is the number of dimensions of the outputs of the transfered neural network. -.. GENERATED FROM PYTHON SOURCE LINES 241-245 +.. GENERATED FROM PYTHON SOURCE LINES 238-242 .. code-block:: default @@ -588,8 +567,6 @@ neural network. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (2, 1000) (2, 169000) @@ -597,11 +574,11 @@ neural network. -.. GENERATED FROM PYTHON SOURCE LINES 246-247 +.. GENERATED FROM PYTHON SOURCE LINES 243-244 Graph again. -.. GENERATED FROM PYTHON SOURCE LINES 247-260 +.. GENERATED FROM PYTHON SOURCE LINES 244-257 .. code-block:: default @@ -633,35 +610,23 @@ Graph again. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 5.219 seconds) + **Total running time of the script:** ( 0 minutes 2.727 seconds) .. _sphx_glr_download_auto_tutorial_plot_gbegin_transfer_learning.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gbegin_transfer_learning.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gbegin_transfer_learning.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gbegin_transfer_learning.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gbegin_transfer_learning.ipynb ` + :download:`Download Jupyter notebook: plot_gbegin_transfer_learning.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gconverting.rst.txt b/_sources/auto_tutorial/plot_gconverting.rst.txt index f4686bd5f..49267ef21 100644 --- a/_sources/auto_tutorial/plot_gconverting.rst.txt +++ b/_sources/auto_tutorial/plot_gconverting.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gconverting.py" +.. "auto_tutorial/plot_gconverting.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -24,13 +24,10 @@ Modify the ONNX graph This example shows how to change the default ONNX graph such as renaming the inputs or outputs names. -.. contents:: - :local: - Basic example +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 17-43 +.. GENERATED FROM PYTHON SOURCE LINES 14-41 .. code-block:: default @@ -51,7 +48,8 @@ Basic example clr.fit(X_train, y_train) - onx = to_onnx(clr, X, options={'zipmap': False}) + onx = to_onnx(clr, X, options={'zipmap': False}, + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -66,18 +64,16 @@ Basic example .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none inputs=['X'], outputs=['label', 'probabilities'] - [array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05], - [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)] + [array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406], + [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)] -.. GENERATED FROM PYTHON SOURCE LINES 44-50 +.. GENERATED FROM PYTHON SOURCE LINES 42-48 Changes the input names +++++++++++++++++++++++ @@ -86,13 +82,14 @@ It is possible to change the input name by using the parameter *initial_types*. However, the user must specify the input types as well. -.. GENERATED FROM PYTHON SOURCE LINES 50-61 +.. GENERATED FROM PYTHON SOURCE LINES 48-60 .. code-block:: default onx = to_onnx(clr, X, options={'zipmap': False}, - initial_types=[('X56', FloatTensorType([None, X.shape[1]]))]) + initial_types=[('X56', FloatTensorType([None, X.shape[1]]))], + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -107,18 +104,16 @@ types as well. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none inputs=['X56'], outputs=['label', 'probabilities'] - [array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05], - [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)] + [array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406], + [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)] -.. GENERATED FROM PYTHON SOURCE LINES 62-67 +.. GENERATED FROM PYTHON SOURCE LINES 61-66 Changes the output names ++++++++++++++++++++++++ @@ -126,14 +121,15 @@ Changes the output names It is possible to change the input name by using the parameter *final_types*. -.. GENERATED FROM PYTHON SOURCE LINES 67-78 +.. GENERATED FROM PYTHON SOURCE LINES 66-78 .. code-block:: default onx = to_onnx(clr, X, options={'zipmap': False}, final_types=[('L', Int64TensorType([None])), - ('P', FloatTensorType([None, 3]))]) + ('P', FloatTensorType([None, 3]))], + target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -147,13 +143,11 @@ parameter *final_types*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none inputs=['X'], outputs=['L', 'P'] - [array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05], - [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)] + [array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406], + [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)] @@ -182,7 +176,7 @@ to unique names. It does not impact the graph inputs or outputs. onx = to_onnx(clr, X, options={'zipmap': False}, - naming=rename_results) + naming=rename_results, target_opset=15) sess = InferenceSession(onx.SerializeToString()) input_names = [i.name for i in sess.get_inputs()] @@ -195,8 +189,6 @@ to unique names. It does not impact the graph inputs or outputs. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none changed 'SklearnLinearClassifier' into '_SKLEARNLINEARCLASSIFIER'. @@ -206,8 +198,8 @@ to unique names. It does not impact the graph inputs or outputs. changed 'probability_tensor' into '_PROBABILITY_TENSOR'. changed 'Normalizer' into '_NORMALIZER'. inputs=['X'], outputs=['label', 'probabilities'] - [array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05], - [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)] + [array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406], + [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)] @@ -215,35 +207,23 @@ to unique names. It does not impact the graph inputs or outputs. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.103 seconds) + **Total running time of the script:** ( 0 minutes 0.063 seconds) .. _sphx_glr_download_auto_tutorial_plot_gconverting.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gconverting.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gconverting.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gconverting.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gconverting.ipynb ` + :download:`Download Jupyter notebook: plot_gconverting.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gexternal_catboost.rst.txt b/_sources/auto_tutorial/plot_gexternal_catboost.rst.txt new file mode 100644 index 000000000..673f48fe9 --- /dev/null +++ b/_sources/auto_tutorial/plot_gexternal_catboost.rst.txt @@ -0,0 +1,320 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_tutorial/plot_gexternal_catboost.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_auto_tutorial_plot_gexternal_catboost.py: + + +.. _example-catboost: + +Convert a pipeline with a CatBoost classifier +============================================= + +.. index:: CatBoost + +:epkg:`sklearn-onnx` only converts :epkg:`scikit-learn` models into *ONNX* +but many libraries implement :epkg:`scikit-learn` API so that their models +can be included in a :epkg:`scikit-learn` pipeline. This example considers +a pipeline including a :epkg:`CatBoost` model. :epkg:`sklearn-onnx` can convert +the whole pipeline as long as it knows the converter associated to +a *CatBoostClassifier*. Let's see how to do it. + +Train a CatBoostClassifier +++++++++++++++++++++++++++ + +.. GENERATED FROM PYTHON SOURCE LINES 21-49 + +.. code-block:: default + + from pyquickhelper.helpgen.graphviz_helper import plot_graphviz + import numpy + from onnx.helper import get_attribute_value + from sklearn.datasets import load_iris + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import StandardScaler + from mlprodict.onnxrt import OnnxInference + import onnxruntime as rt + from skl2onnx import convert_sklearn, update_registered_converter + from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes # noqa + from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, guess_tensor_type + from skl2onnx._parse import _apply_zipmap, _get_sklearn_operator_name + from catboost import CatBoostClassifier + from catboost.utils import convert_to_onnx_object + + data = load_iris() + X = data.data[:, :2] + y = data.target + + ind = numpy.arange(X.shape[0]) + numpy.random.shuffle(ind) + X = X[ind, :].copy() + y = y[ind].copy() + + pipe = Pipeline([('scaler', StandardScaler()), + ('lgbm', CatBoostClassifier(n_estimators=3))]) + pipe.fit(X, y) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + Learning rate set to 0.5 + 0: learn: 0.8233591 total: 54.7ms remaining: 109ms + 1: learn: 0.6635820 total: 55.8ms remaining: 27.9ms + 2: learn: 0.5885989 total: 56.6ms remaining: 0us + + +.. raw:: html + +
+
Pipeline(steps=[('scaler', StandardScaler()),
+                    ('lgbm',
+                     <catboost.core.CatBoostClassifier object at 0x7ff3015be440>)])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 50-57 + +Register the converter for CatBoostClassifier ++++++++++++++++++++++++++++++++++++++++++++++ + +The model has no converter implemented in sklearn-onnx. +We need to register the one coming from *CatBoost* itself. +However, the converter does not follow sklearn-onnx design and +needs to be wrapped. + +.. GENERATED FROM PYTHON SOURCE LINES 57-120 + +.. code-block:: default + + + + def skl2onnx_parser_castboost_classifier(scope, model, inputs, + custom_parsers=None): + options = scope.get_options(model, dict(zipmap=True)) + no_zipmap = isinstance(options['zipmap'], bool) and not options['zipmap'] + + alias = _get_sklearn_operator_name(type(model)) + this_operator = scope.declare_local_operator(alias, model) + this_operator.inputs = inputs + + label_variable = scope.declare_local_variable('label', Int64TensorType()) + prob_dtype = guess_tensor_type(inputs[0].type) + probability_tensor_variable = scope.declare_local_variable('probabilities', prob_dtype) + this_operator.outputs.append(label_variable) + this_operator.outputs.append(probability_tensor_variable) + probability_tensor = this_operator.outputs + + if no_zipmap: + return probability_tensor + + return _apply_zipmap(options['zipmap'], scope, model, + inputs[0].type, probability_tensor) + + + def skl2onnx_convert_catboost(scope, operator, container): + """ + CatBoost returns an ONNX graph with a single node. + This function adds it to the main graph. + """ + onx = convert_to_onnx_object(operator.raw_operator) + opsets = {d.domain: d.version for d in onx.opset_import} + if '' in opsets and opsets[''] >= container.target_opset: + raise RuntimeError( + "CatBoost uses an opset more recent than the target one.") + if len(onx.graph.initializer) > 0 or len(onx.graph.sparse_initializer) > 0: + raise NotImplementedError( + "CatBoost returns a model initializers. This option is not implemented yet.") + if (len(onx.graph.node) not in (1, 2) or not onx.graph.node[0].op_type.startswith("TreeEnsemble") or + (len(onx.graph.node) == 2 and onx.graph.node[1].op_type != "ZipMap")): + types = ", ".join(map(lambda n: n.op_type, onx.graph.node)) + raise NotImplementedError( + f"CatBoost returns {len(onx.graph.node)} != 1 (types={types}). " + f"This option is not implemented yet.") + node = onx.graph.node[0] + atts = {} + for att in node.attribute: + atts[att.name] = get_attribute_value(att) + container.add_node( + node.op_type, [operator.inputs[0].full_name], + [operator.outputs[0].full_name, operator.outputs[1].full_name], + op_domain=node.domain, op_version=opsets.get(node.domain, None), + **atts) + + + update_registered_converter( + CatBoostClassifier, + 'CatBoostCatBoostClassifier', + calculate_linear_classifier_output_shapes, + skl2onnx_convert_catboost, + parser=skl2onnx_parser_castboost_classifier, + options={'nocl': [True, False], 'zipmap': [True, False, 'columns']}) + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 121-123 + +Convert ++++++++ + +.. GENERATED FROM PYTHON SOURCE LINES 123-133 + +.. code-block:: default + + + model_onnx = convert_sklearn( + pipe, 'pipeline_catboost', + [('input', FloatTensorType([None, 2]))], + target_opset={'': 12, 'ai.onnx.ml': 2}) + + # And save. + with open("pipeline_catboost.onnx", "wb") as f: + f.write(model_onnx.SerializeToString()) + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 134-138 + +Compare the predictions ++++++++++++++++++++++++ + +Predictions with CatBoost. + +.. GENERATED FROM PYTHON SOURCE LINES 138-142 + +.. code-block:: default + + + print("predict", pipe.predict(X[:5])) + print("predict_proba", pipe.predict_proba(X[:1])) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + predict [[2] + [0] + [1] + [1] + [0]] + predict_proba [[0.09983726 0.22940648 0.67075626]] + + + + +.. GENERATED FROM PYTHON SOURCE LINES 143-144 + +Predictions with onnxruntime. + +.. GENERATED FROM PYTHON SOURCE LINES 144-151 + +.. code-block:: default + + + sess = rt.InferenceSession("pipeline_catboost.onnx") + + pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)}) + print("predict", pred_onx[0]) + print("predict_proba", pred_onx[1][:1]) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + predict [2 0 1 1 0] + predict_proba [{0: 0.09983726590871811, 1: 0.22940650582313538, 2: 0.6707562804222107}] + + + + +.. GENERATED FROM PYTHON SOURCE LINES 152-154 + +Final graph ++++++++++++ + +.. GENERATED FROM PYTHON SOURCE LINES 154-159 + +.. code-block:: default + + + oinf = OnnxInference(model_onnx) + ax = plot_graphviz(oinf.to_dot()) + ax.get_xaxis().set_visible(False) + ax.get_yaxis().set_visible(False) + + + +.. image-sg:: /auto_tutorial/images/sphx_glr_plot_gexternal_catboost_001.png + :alt: plot gexternal catboost + :srcset: /auto_tutorial/images/sphx_glr_plot_gexternal_catboost_001.png + :class: sphx-glr-single-img + + + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 1.134 seconds) + + +.. _sphx_glr_download_auto_tutorial_plot_gexternal_catboost.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: plot_gexternal_catboost.py ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: plot_gexternal_catboost.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/_sources/auto_tutorial/plot_gexternal_lightgbm.rst.txt b/_sources/auto_tutorial/plot_gexternal_lightgbm.rst.txt index 93e37e4d8..e3f424aee 100644 --- a/_sources/auto_tutorial/plot_gexternal_lightgbm.rst.txt +++ b/_sources/auto_tutorial/plot_gexternal_lightgbm.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gexternal_lightgbm.py" +.. "auto_tutorial/plot_gexternal_lightgbm.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -32,13 +32,10 @@ a pipeline including a :epkg:`LightGBM` model. :epkg:`sklearn-onnx` can convert the whole pipeline as long as it knows the converter associated to a *LGBMClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a LightGBM classifier +++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 25-51 +.. GENERATED FROM PYTHON SOURCE LINES 22-48 .. code-block:: default @@ -76,14 +73,14 @@ Train a LightGBM classifier .. raw:: html
-
Pipeline(steps=[('scaler', StandardScaler()),
-                    ('lgbm', LGBMClassifier(n_estimators=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
Pipeline(steps=[('scaler', StandardScaler()),
+                    ('lgbm', LGBMClassifier(n_estimators=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 52-63 +.. GENERATED FROM PYTHON SOURCE LINES 49-60 Register the converter for LGBMClassifier +++++++++++++++++++++++++++++++++++++++++ @@ -97,7 +94,7 @@ and the shape calculator: `_. -.. GENERATED FROM PYTHON SOURCE LINES 63-69 +.. GENERATED FROM PYTHON SOURCE LINES 60-66 .. code-block:: default @@ -114,12 +111,12 @@ lightgbm/shape_calculators/Classifier.py>`_. -.. GENERATED FROM PYTHON SOURCE LINES 70-72 +.. GENERATED FROM PYTHON SOURCE LINES 67-69 Convert again +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 72-82 +.. GENERATED FROM PYTHON SOURCE LINES 69-79 .. code-block:: default @@ -140,14 +137,14 @@ Convert again -.. GENERATED FROM PYTHON SOURCE LINES 83-87 +.. GENERATED FROM PYTHON SOURCE LINES 80-84 Compare the predictions +++++++++++++++++++++++ Predictions with LightGbm. -.. GENERATED FROM PYTHON SOURCE LINES 87-91 +.. GENERATED FROM PYTHON SOURCE LINES 84-88 .. code-block:: default @@ -161,21 +158,19 @@ Predictions with LightGbm. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 2 0 0 0] - predict_proba [[0.25335584 0.45934348 0.28730068]] + predict [2 1 1 1 2] + predict_proba [[0.27391998 0.27510369 0.45097633]] -.. GENERATED FROM PYTHON SOURCE LINES 92-93 +.. GENERATED FROM PYTHON SOURCE LINES 89-90 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 93-100 +.. GENERATED FROM PYTHON SOURCE LINES 90-97 .. code-block:: default @@ -192,22 +187,20 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 2 0 0 0] - predict_proba [{0: 0.25335583090782166, 1: 0.45934349298477173, 2: 0.287300705909729}] + predict [2 1 1 1 2] + predict_proba [{0: 0.2739199697971344, 1: 0.27510371804237366, 2: 0.4509763717651367}] -.. GENERATED FROM PYTHON SOURCE LINES 101-103 +.. GENERATED FROM PYTHON SOURCE LINES 98-100 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 103-109 +.. GENERATED FROM PYTHON SOURCE LINES 100-106 .. code-block:: default @@ -232,35 +225,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.510 seconds) + **Total running time of the script:** ( 0 minutes 0.542 seconds) .. _sphx_glr_download_auto_tutorial_plot_gexternal_lightgbm.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gexternal_lightgbm.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gexternal_lightgbm.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gexternal_lightgbm.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gexternal_lightgbm.ipynb ` + :download:`Download Jupyter notebook: plot_gexternal_lightgbm.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gexternal_lightgbm_reg.rst.txt b/_sources/auto_tutorial/plot_gexternal_lightgbm_reg.rst.txt index 09eadaa5f..fc22bb905 100644 --- a/_sources/auto_tutorial/plot_gexternal_lightgbm_reg.rst.txt +++ b/_sources/auto_tutorial/plot_gexternal_lightgbm_reg.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gexternal_lightgbm_reg.py" +.. "auto_tutorial/plot_gexternal_lightgbm_reg.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -48,13 +48,10 @@ the discrepancies then become :math:`D'(x) = |\sum_{k=1}^a \left[\sum\right]_{i=1}^{F/a} float(T_{ak + i}(x)) - \sum_{i=1}^F T_i(x)|`. -.. contents:: - :local: - Train a LGBMRegressor +++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 41-64 +.. GENERATED FROM PYTHON SOURCE LINES 38-61 .. code-block:: default @@ -89,12 +86,12 @@ Train a LGBMRegressor .. raw:: html
-
LGBMRegressor(n_estimators=1000)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
LGBMRegressor(n_estimators=1000)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.


-.. GENERATED FROM PYTHON SOURCE LINES 65-76 +.. GENERATED FROM PYTHON SOURCE LINES 62-73 Register the converter for LGBMClassifier +++++++++++++++++++++++++++++++++++++++++ @@ -108,7 +105,7 @@ and the shape calculator: `_. -.. GENERATED FROM PYTHON SOURCE LINES 76-97 +.. GENERATED FROM PYTHON SOURCE LINES 73-94 .. code-block:: default @@ -140,7 +137,7 @@ lightgbm/shape_calculators/Regressor.py>`_. -.. GENERATED FROM PYTHON SOURCE LINES 98-104 +.. GENERATED FROM PYTHON SOURCE LINES 95-101 Convert +++++++ @@ -149,7 +146,7 @@ We convert the same model following the two scenarios, one single TreeEnsembleRegressor node, or more. *split* parameter is the number of trees per node TreeEnsembleRegressor. -.. GENERATED FROM PYTHON SOURCE LINES 104-111 +.. GENERATED FROM PYTHON SOURCE LINES 101-108 .. code-block:: default @@ -167,12 +164,12 @@ trees per node TreeEnsembleRegressor. -.. GENERATED FROM PYTHON SOURCE LINES 112-114 +.. GENERATED FROM PYTHON SOURCE LINES 109-111 Discrepancies +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 114-130 +.. GENERATED FROM PYTHON SOURCE LINES 111-127 .. code-block:: default @@ -198,22 +195,20 @@ Discrepancies .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - sum of discrepancies 1 node 0.00011677806003327501 - sum of discrepancies split node 4.402241789117978e-05 ratio: 2.6526952772549177 + sum of discrepancies 1 node 0.0002170055282193992 + sum of discrepancies split node 4.3252795611109084e-05 ratio: 5.01714456033134 -.. GENERATED FROM PYTHON SOURCE LINES 131-133 +.. GENERATED FROM PYTHON SOURCE LINES 128-130 The sum of the discrepancies were reduced 4, 5 times. The maximum is much better too. -.. GENERATED FROM PYTHON SOURCE LINES 133-140 +.. GENERATED FROM PYTHON SOURCE LINES 130-137 .. code-block:: default @@ -230,34 +225,32 @@ The maximum is much better too. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - max discrepancies 1 node 1.0208151959290035e-06 - max discrepancies split node 3.5547470744745624e-07 ratio: 2.8716957199546838 + max discrepancies 1 node 1.7663594098493718e-06 + max discrepancies split node 3.081887920419746e-07 ratio: 5.731420010915899 -.. GENERATED FROM PYTHON SOURCE LINES 141-145 +.. GENERATED FROM PYTHON SOURCE LINES 138-142 Processing time +++++++++++++++ The processing time is slower but not much. -.. GENERATED FROM PYTHON SOURCE LINES 145-153 +.. GENERATED FROM PYTHON SOURCE LINES 142-150 .. code-block:: default print("processing time no split", timeit.timeit( - lambda: sess.run(None, {'X': X32})[0], number=150)) + lambda: sess.run(None, {'X': X32})[0], number=150)) print("processing time split", timeit.timeit( - lambda: sess_split.run(None, {'X': X32})[0], number=150)) + lambda: sess_split.run(None, {'X': X32})[0], number=150)) @@ -265,17 +258,15 @@ The processing time is slower but not much. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - processing time no split 0.7822519999999997 - processing time split 0.8093972000000065 + processing time no split 2.637800576000245 + processing time split 2.7089149429998542 -.. GENERATED FROM PYTHON SOURCE LINES 154-159 +.. GENERATED FROM PYTHON SOURCE LINES 151-156 Split influence +++++++++++++++ @@ -283,7 +274,7 @@ Split influence Let's see how the sum of the discrepancies moves against the parameter *split*. -.. GENERATED FROM PYTHON SOURCE LINES 159-174 +.. GENERATED FROM PYTHON SOURCE LINES 156-171 .. code-block:: default @@ -308,40 +299,39 @@ the parameter *split*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/12 [00:00` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gexternal_lightgbm_reg.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gexternal_lightgbm_reg.ipynb ` + :download:`Download Jupyter notebook: plot_gexternal_lightgbm_reg.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_gexternal_xgboost.rst.txt b/_sources/auto_tutorial/plot_gexternal_xgboost.rst.txt index 3e01aa2d2..39668ed84 100644 --- a/_sources/auto_tutorial/plot_gexternal_xgboost.rst.txt +++ b/_sources/auto_tutorial/plot_gexternal_xgboost.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_gexternal_xgboost.py" +.. "auto_tutorial/plot_gexternal_xgboost.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -33,13 +33,10 @@ model. :epkg:`sklearn-onnx` can convert the whole pipeline as long as it knows the converter associated to a *XGBClassifier*. Let's see how to do it. -.. contents:: - :local: - Train a XGBoost classifier ++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 26-75 +.. GENERATED FROM PYTHON SOURCE LINES 23-72 .. code-block:: default @@ -96,20 +93,10 @@ Train a XGBoost classifier -.. rst-class:: sphx-glr-script-out - - Out: - - .. code-block:: none - - D:\Program Files\Python\Python39\lib\site-packages\xgboost\sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1]. - warnings.warn(label_encoder_deprecation_msg, UserWarning) - [14:44:07] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior. - -.. GENERATED FROM PYTHON SOURCE LINES 76-87 +.. GENERATED FROM PYTHON SOURCE LINES 73-84 Register the converter for XGBClassifier ++++++++++++++++++++++++++++++++++++++++ @@ -123,7 +110,7 @@ and the shape calculator: `_. -.. GENERATED FROM PYTHON SOURCE LINES 87-93 +.. GENERATED FROM PYTHON SOURCE LINES 84-90 .. code-block:: default @@ -140,12 +127,12 @@ xgboost/shape_calculators/Classifier.py>`_. -.. GENERATED FROM PYTHON SOURCE LINES 94-96 +.. GENERATED FROM PYTHON SOURCE LINES 91-93 Convert again +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 96-106 +.. GENERATED FROM PYTHON SOURCE LINES 93-103 .. code-block:: default @@ -166,14 +153,14 @@ Convert again -.. GENERATED FROM PYTHON SOURCE LINES 107-111 +.. GENERATED FROM PYTHON SOURCE LINES 104-108 Compare the predictions +++++++++++++++++++++++ Predictions with XGBoost. -.. GENERATED FROM PYTHON SOURCE LINES 111-115 +.. GENERATED FROM PYTHON SOURCE LINES 108-112 .. code-block:: default @@ -187,21 +174,19 @@ Predictions with XGBoost. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 2 2 0] - predict_proba [[0.18270978 0.63138163 0.18590865]] + predict [1 1 1 2 1] + predict_proba [[0.1758379 0.43438542 0.3897767 ]] -.. GENERATED FROM PYTHON SOURCE LINES 116-117 +.. GENERATED FROM PYTHON SOURCE LINES 113-114 Predictions with onnxruntime. -.. GENERATED FROM PYTHON SOURCE LINES 117-123 +.. GENERATED FROM PYTHON SOURCE LINES 114-120 .. code-block:: default @@ -217,22 +202,20 @@ Predictions with onnxruntime. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [1 0 2 2 0] - predict_proba [{0: 0.18270978331565857, 1: 0.631381630897522, 2: 0.18590864539146423}] + predict [1 1 1 2 1] + predict_proba [{0: 0.175837904214859, 1: 0.43438541889190674, 2: 0.38977670669555664}] -.. GENERATED FROM PYTHON SOURCE LINES 124-126 +.. GENERATED FROM PYTHON SOURCE LINES 121-123 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 126-134 +.. GENERATED FROM PYTHON SOURCE LINES 123-131 .. code-block:: default @@ -256,12 +239,12 @@ Final graph -.. GENERATED FROM PYTHON SOURCE LINES 135-137 +.. GENERATED FROM PYTHON SOURCE LINES 132-134 Same example with XGBRegressor ++++++++++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 137-154 +.. GENERATED FROM PYTHON SOURCE LINES 134-151 .. code-block:: default @@ -288,20 +271,18 @@ Same example with XGBRegressor .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [142.53963 62.165913 77.709404 58.369633 108.54605 ] + predict [73.99487 44.04601 77.94355 75.82603 44.04601] -.. GENERATED FROM PYTHON SOURCE LINES 155-156 +.. GENERATED FROM PYTHON SOURCE LINES 152-153 ONNX -.. GENERATED FROM PYTHON SOURCE LINES 156-164 +.. GENERATED FROM PYTHON SOURCE LINES 153-161 .. code-block:: default @@ -319,21 +300,19 @@ ONNX .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - predict [142.53963 62.165913 77.709404 58.369633 108.54605 ] + predict [73.99487 44.04601 77.94355 75.82603 44.04601] -.. GENERATED FROM PYTHON SOURCE LINES 165-167 +.. GENERATED FROM PYTHON SOURCE LINES 162-164 Some discrepencies may appear. In that case, you should read :ref:`l-example-discrepencies-float-double`. -.. GENERATED FROM PYTHON SOURCE LINES 169-175 +.. GENERATED FROM PYTHON SOURCE LINES 166-172 Same with a Booster +++++++++++++++++++ @@ -342,7 +321,7 @@ A booster cannot be inserted in a pipeline. It requires a different conversion function because it does not follow :epkg:`scikit-learn` API. -.. GENERATED FROM PYTHON SOURCE LINES 175-203 +.. GENERATED FROM PYTHON SOURCE LINES 172-200 .. code-block:: default @@ -380,11 +359,8 @@ follow :epkg:`scikit-learn` API. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - [14:44:08] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softmax' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior. [0 0 1 1 0 1 0 1 0 1 0 0 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 1 1 1 0 1 1 0 1 1 1 0 1 1 1 0 0 1 1 0 0 0 1 0] @@ -394,35 +370,23 @@ follow :epkg:`scikit-learn` API. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.586 seconds) + **Total running time of the script:** ( 0 minutes 0.570 seconds) .. _sphx_glr_download_auto_tutorial_plot_gexternal_xgboost.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_gexternal_xgboost.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_gexternal_xgboost.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_gexternal_xgboost.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_gexternal_xgboost.ipynb ` + :download:`Download Jupyter notebook: plot_gexternal_xgboost.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_icustom_converter.rst.txt b/_sources/auto_tutorial/plot_icustom_converter.rst.txt index ed334b3a3..37a150d63 100644 --- a/_sources/auto_tutorial/plot_icustom_converter.rst.txt +++ b/_sources/auto_tutorial/plot_icustom_converter.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_icustom_converter.py" +.. "auto_tutorial/plot_icustom_converter.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -39,9 +39,6 @@ in fact two functions: This example implements both components for a new model. -.. contents:: - :local: - Custom model ++++++++++++ @@ -51,7 +48,7 @@ which decorrelates correlated random variables. If *X* is a matrix of features, :math:`V=\frac{1}{n}X'X` is the covariance matrix. We compute :math:`X V^{1/2}`. -.. GENERATED FROM PYTHON SOURCE LINES 37-127 +.. GENERATED FROM PYTHON SOURCE LINES 34-124 .. code-block:: default @@ -151,8 +148,6 @@ is the covariance matrix. We compute :math:`X V^{1/2}`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0.0167562 0.52111756 -1.24946737 -0.56194325] @@ -164,11 +159,11 @@ is the covariance matrix. We compute :math:`X V^{1/2}`. -.. GENERATED FROM PYTHON SOURCE LINES 128-129 +.. GENERATED FROM PYTHON SOURCE LINES 125-126 Trained coefficients. -.. GENERATED FROM PYTHON SOURCE LINES 129-133 +.. GENERATED FROM PYTHON SOURCE LINES 126-130 .. code-block:: default @@ -182,8 +177,6 @@ Trained coefficients. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[5.84333333 3.05733333 3.758 1.19933333]] @@ -195,14 +188,14 @@ Trained coefficients. -.. GENERATED FROM PYTHON SOURCE LINES 134-138 +.. GENERATED FROM PYTHON SOURCE LINES 131-135 Conversion into ONNX ++++++++++++++++++++ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 138-145 +.. GENERATED FROM PYTHON SOURCE LINES 135-142 .. code-block:: default @@ -219,8 +212,6 @@ Let's try to convert it and see what happens. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Unable to find a shape calculator for type ''. @@ -240,7 +231,7 @@ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 146-154 +.. GENERATED FROM PYTHON SOURCE LINES 143-151 This error means there is no converter associated to *DecorrelateTransformer*. Let's implement it. @@ -251,7 +242,7 @@ First the shape calculator. We retrieve the input type add tells the output type has the same type, the same number of rows and a specific number of columns. -.. GENERATED FROM PYTHON SOURCE LINES 154-167 +.. GENERATED FROM PYTHON SOURCE LINES 151-164 .. code-block:: default @@ -275,14 +266,14 @@ the same number of rows and a specific number of columns. -.. GENERATED FROM PYTHON SOURCE LINES 168-172 +.. GENERATED FROM PYTHON SOURCE LINES 165-169 The converter. One thing we need to pay attention to is the target opset. This information is important to make sure that every node is defined following the specifications of that opset. -.. GENERATED FROM PYTHON SOURCE LINES 172-197 +.. GENERATED FROM PYTHON SOURCE LINES 169-194 .. code-block:: default @@ -318,11 +309,11 @@ specifications of that opset. -.. GENERATED FROM PYTHON SOURCE LINES 198-199 +.. GENERATED FROM PYTHON SOURCE LINES 195-196 We need to let *skl2onnx* know about the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 199-224 +.. GENERATED FROM PYTHON SOURCE LINES 196-221 .. code-block:: default @@ -357,20 +348,18 @@ We need to let *skl2onnx* know about the new converter. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (6.046576181972796e-07, 0.0002951417065241126) + (6.04657619085458e-07, 0.0002951417065406967) -.. GENERATED FROM PYTHON SOURCE LINES 225-226 +.. GENERATED FROM PYTHON SOURCE LINES 222-223 Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 226-235 +.. GENERATED FROM PYTHON SOURCE LINES 223-232 .. code-block:: default @@ -389,8 +378,6 @@ Let's check it works as well with double. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (0.0, 0.0) @@ -398,16 +385,16 @@ Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 236-237 +.. GENERATED FROM PYTHON SOURCE LINES 233-234 The differences are smaller with double as expected. -.. GENERATED FROM PYTHON SOURCE LINES 239-241 +.. GENERATED FROM PYTHON SOURCE LINES 236-238 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 241-246 +.. GENERATED FROM PYTHON SOURCE LINES 238-243 .. code-block:: default @@ -431,35 +418,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.406 seconds) + **Total running time of the script:** ( 0 minutes 0.321 seconds) .. _sphx_glr_download_auto_tutorial_plot_icustom_converter.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_icustom_converter.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_icustom_converter.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_icustom_converter.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_icustom_converter.ipynb ` + :download:`Download Jupyter notebook: plot_icustom_converter.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_jcustom_syntax.rst.txt b/_sources/auto_tutorial/plot_jcustom_syntax.rst.txt index 02edb7802..4a0ababd7 100644 --- a/_sources/auto_tutorial/plot_jcustom_syntax.rst.txt +++ b/_sources/auto_tutorial/plot_jcustom_syntax.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_jcustom_syntax.py" +.. "auto_tutorial/plot_jcustom_syntax.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -35,17 +35,13 @@ The first way is used in :ref:`l-plot-custom-converter`. This one demonstrates the second way which is usually the one used in other converter library. It is more verbose. -.. contents:: - :local: - - Custom model ++++++++++++ It basically copies what is in example `:ref:`l-plot-custom-converter`. -.. GENERATED FROM PYTHON SOURCE LINES 31-88 +.. GENERATED FROM PYTHON SOURCE LINES 27-84 .. code-block:: default @@ -112,8 +108,6 @@ It basically copies what is in example .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0.0167562 0.52111756 -1.24946737 -0.56194325] @@ -125,14 +119,14 @@ It basically copies what is in example -.. GENERATED FROM PYTHON SOURCE LINES 89-93 +.. GENERATED FROM PYTHON SOURCE LINES 85-89 Conversion into ONNX ++++++++++++++++++++ The shape calculator does not change. -.. GENERATED FROM PYTHON SOURCE LINES 93-105 +.. GENERATED FROM PYTHON SOURCE LINES 89-101 .. code-block:: default @@ -155,11 +149,11 @@ The shape calculator does not change. -.. GENERATED FROM PYTHON SOURCE LINES 106-107 +.. GENERATED FROM PYTHON SOURCE LINES 102-103 The converter is different. -.. GENERATED FROM PYTHON SOURCE LINES 107-146 +.. GENERATED FROM PYTHON SOURCE LINES 103-142 .. code-block:: default @@ -209,11 +203,11 @@ The converter is different. -.. GENERATED FROM PYTHON SOURCE LINES 147-148 +.. GENERATED FROM PYTHON SOURCE LINES 143-144 We need to let *skl2onnx* know about the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 148-172 +.. GENERATED FROM PYTHON SOURCE LINES 144-168 .. code-block:: default @@ -247,20 +241,18 @@ We need to let *skl2onnx* know about the new converter. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (6.046576181972796e-07, 0.0002951417065241126) + (6.04657619085458e-07, 0.0002951417065406967) -.. GENERATED FROM PYTHON SOURCE LINES 173-174 +.. GENERATED FROM PYTHON SOURCE LINES 169-170 Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 174-183 +.. GENERATED FROM PYTHON SOURCE LINES 170-179 .. code-block:: default @@ -279,8 +271,6 @@ Let's check it works as well with double. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (0.0, 0.0) @@ -288,42 +278,30 @@ Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 184-185 +.. GENERATED FROM PYTHON SOURCE LINES 180-181 The differences are smaller with double as expected. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.078 seconds) + **Total running time of the script:** ( 0 minutes 0.028 seconds) .. _sphx_glr_download_auto_tutorial_plot_jcustom_syntax.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_jcustom_syntax.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_jcustom_syntax.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_jcustom_syntax.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_jcustom_syntax.ipynb ` + :download:`Download Jupyter notebook: plot_jcustom_syntax.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_kcustom_converter_wrapper.rst.txt b/_sources/auto_tutorial/plot_kcustom_converter_wrapper.rst.txt index 5a40a07d5..de257858f 100644 --- a/_sources/auto_tutorial/plot_kcustom_converter_wrapper.rst.txt +++ b/_sources/auto_tutorial/plot_kcustom_converter_wrapper.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_kcustom_converter_wrapper.py" +.. "auto_tutorial/plot_kcustom_converter_wrapper.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -33,9 +33,6 @@ can be rewritten by using a `PCA `_. We could then reuse the converter associated to this model. -.. contents:: - :local: - Custom model ++++++++++++ @@ -45,7 +42,7 @@ which decorrelates correlated random variables. If *X* is a matrix of features, :math:`V=\frac{1}{n}X'X` is the covariance matrix. We compute :math:`X V^{1/2}`. -.. GENERATED FROM PYTHON SOURCE LINES 31-102 +.. GENERATED FROM PYTHON SOURCE LINES 28-99 .. code-block:: default @@ -126,8 +123,6 @@ is the covariance matrix. We compute :math:`X V^{1/2}`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[-2.68412563e+00 3.19397247e-01 -2.79148276e-02 -2.26243707e-03] @@ -139,14 +134,14 @@ is the covariance matrix. We compute :math:`X V^{1/2}`. -.. GENERATED FROM PYTHON SOURCE LINES 103-107 +.. GENERATED FROM PYTHON SOURCE LINES 100-104 Conversion into ONNX ++++++++++++++++++++ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 107-114 +.. GENERATED FROM PYTHON SOURCE LINES 104-111 .. code-block:: default @@ -163,8 +158,6 @@ Let's try to convert it and see what happens. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Unable to find a shape calculator for type ''. @@ -184,7 +177,7 @@ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 115-123 +.. GENERATED FROM PYTHON SOURCE LINES 112-120 This error means there is no converter associated to *DecorrelateTransformer*. Let's do it. @@ -195,7 +188,7 @@ First the shape calculator. We retrieve the input type add tells the output type has the same type, the same number of rows and a specific number of columns. -.. GENERATED FROM PYTHON SOURCE LINES 123-133 +.. GENERATED FROM PYTHON SOURCE LINES 120-130 .. code-block:: default @@ -216,14 +209,14 @@ the same number of rows and a specific number of columns. -.. GENERATED FROM PYTHON SOURCE LINES 134-138 +.. GENERATED FROM PYTHON SOURCE LINES 131-135 The converter. One thing we need to pay attention to is the target opset. This information is important to make sure that every node is defined following the specifications of that opset. -.. GENERATED FROM PYTHON SOURCE LINES 138-154 +.. GENERATED FROM PYTHON SOURCE LINES 135-151 .. code-block:: default @@ -250,11 +243,11 @@ specifications of that opset. -.. GENERATED FROM PYTHON SOURCE LINES 155-156 +.. GENERATED FROM PYTHON SOURCE LINES 152-153 We need to let *skl2onnx* know about the new converter. -.. GENERATED FROM PYTHON SOURCE LINES 156-181 +.. GENERATED FROM PYTHON SOURCE LINES 153-178 .. code-block:: default @@ -289,20 +282,18 @@ We need to let *skl2onnx* know about the new converter. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (3.56012595403854e-07, 0.0003158352661955726) + (3.560125949597648e-07, 0.0003158352661960492) -.. GENERATED FROM PYTHON SOURCE LINES 182-183 +.. GENERATED FROM PYTHON SOURCE LINES 179-180 Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 183-192 +.. GENERATED FROM PYTHON SOURCE LINES 180-189 .. code-block:: default @@ -321,8 +312,6 @@ Let's check it works as well with double. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (0.0, 0.0) @@ -330,16 +319,16 @@ Let's check it works as well with double. -.. GENERATED FROM PYTHON SOURCE LINES 193-194 +.. GENERATED FROM PYTHON SOURCE LINES 190-191 The differences are smaller with double as expected. -.. GENERATED FROM PYTHON SOURCE LINES 197-199 +.. GENERATED FROM PYTHON SOURCE LINES 194-196 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 199-204 +.. GENERATED FROM PYTHON SOURCE LINES 196-201 .. code-block:: default @@ -363,35 +352,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.530 seconds) + **Total running time of the script:** ( 0 minutes 0.499 seconds) .. _sphx_glr_download_auto_tutorial_plot_kcustom_converter_wrapper.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_kcustom_converter_wrapper.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_kcustom_converter_wrapper.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_kcustom_converter_wrapper.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_kcustom_converter_wrapper.ipynb ` + :download:`Download Jupyter notebook: plot_kcustom_converter_wrapper.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_lcustom_options.rst.txt b/_sources/auto_tutorial/plot_lcustom_options.rst.txt index 8d017f563..d13149d4b 100644 --- a/_sources/auto_tutorial/plot_lcustom_options.rst.txt +++ b/_sources/auto_tutorial/plot_lcustom_options.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_lcustom_options.py" +.. "auto_tutorial/plot_lcustom_options.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -35,13 +35,10 @@ Example :ref:`l-plot-custom-converter` implements a converter which uses operator *MatMul*. Option *use_gemm* is used to replace *MatMul* by *Gemm*. -.. contents:: - :local: - Custom model ++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 28-90 +.. GENERATED FROM PYTHON SOURCE LINES 25-87 .. code-block:: default @@ -113,8 +110,6 @@ Custom model .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0.0167562 0.52111756 -1.24946737 -0.56194325] @@ -126,14 +121,14 @@ Custom model -.. GENERATED FROM PYTHON SOURCE LINES 91-95 +.. GENERATED FROM PYTHON SOURCE LINES 88-92 Conversion into ONNX ++++++++++++++++++++ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 95-130 +.. GENERATED FROM PYTHON SOURCE LINES 92-127 .. code-block:: default @@ -179,12 +174,12 @@ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 131-133 +.. GENERATED FROM PYTHON SOURCE LINES 128-130 The registration needs to declare the options supported by the converted. -.. GENERATED FROM PYTHON SOURCE LINES 133-159 +.. GENERATED FROM PYTHON SOURCE LINES 130-156 .. code-block:: default @@ -220,21 +215,19 @@ supported by the converted. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none conversion: use_gemm= False - (6.046576181972796e-07, 0.0002951417065241126) + (6.04657619085458e-07, 0.0002951417065406967) -.. GENERATED FROM PYTHON SOURCE LINES 160-161 +.. GENERATED FROM PYTHON SOURCE LINES 157-158 We try the non default option, `use_gemm: True`. -.. GENERATED FROM PYTHON SOURCE LINES 161-172 +.. GENERATED FROM PYTHON SOURCE LINES 158-169 .. code-block:: default @@ -255,21 +248,19 @@ We try the non default option, `use_gemm: True`. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none conversion: use_gemm= True - (2.01757041429218e-06, 0.0005483764980302357) + (2.01757041717876e-06, 0.0005483764980468156) -.. GENERATED FROM PYTHON SOURCE LINES 173-174 +.. GENERATED FROM PYTHON SOURCE LINES 170-171 Visually. -.. GENERATED FROM PYTHON SOURCE LINES 174-182 +.. GENERATED FROM PYTHON SOURCE LINES 171-179 .. code-block:: default @@ -293,14 +284,14 @@ Visually. -.. GENERATED FROM PYTHON SOURCE LINES 183-187 +.. GENERATED FROM PYTHON SOURCE LINES 180-184 Time comparison +++++++++++++++ Let's compare the two computation. -.. GENERATED FROM PYTHON SOURCE LINES 187-207 +.. GENERATED FROM PYTHON SOURCE LINES 184-204 .. code-block:: default @@ -357,8 +348,8 @@ Let's compare the two computation. average - 0.00002 - 0.000012 + 0.000017 + 0.000011 deviation @@ -367,13 +358,13 @@ Let's compare the two computation. min_exec - 0.000019 - 0.000012 + 0.000015 + 0.00001 max_exec - 0.000057 - 0.000019 + 0.000041 + 0.000016 repeat @@ -400,35 +391,23 @@ Let's compare the two computation. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.376 seconds) + **Total running time of the script:** ( 0 minutes 1.917 seconds) .. _sphx_glr_download_auto_tutorial_plot_lcustom_options.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_lcustom_options.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_lcustom_options.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_lcustom_options.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_lcustom_options.ipynb ` + :download:`Download Jupyter notebook: plot_lcustom_options.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_mcustom_parser.rst.txt b/_sources/auto_tutorial/plot_mcustom_parser.rst.txt index 4ee27e9eb..b17b6bf75 100644 --- a/_sources/auto_tutorial/plot_mcustom_parser.rst.txt +++ b/_sources/auto_tutorial/plot_mcustom_parser.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_mcustom_parser.py" +.. "auto_tutorial/plot_mcustom_parser.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -38,13 +38,10 @@ be a very efficient converter but that's just for the sake of using a parser. By default, a transformer only returns one output but both are needed. -.. contents:: - :local: - A new transformer +++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 30-90 +.. GENERATED FROM PYTHON SOURCE LINES 27-87 .. code-block:: default @@ -114,8 +111,6 @@ A new transformer .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0.0167562 0.52111756 -1.24946737 -0.56194325] @@ -127,14 +122,14 @@ A new transformer -.. GENERATED FROM PYTHON SOURCE LINES 91-95 +.. GENERATED FROM PYTHON SOURCE LINES 88-92 Conversion into ONNX with two outputs +++++++++++++++++++++++++++++++++++++ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 95-146 +.. GENERATED FROM PYTHON SOURCE LINES 92-143 .. code-block:: default @@ -196,11 +191,11 @@ Let's try to convert it and see what happens. -.. GENERATED FROM PYTHON SOURCE LINES 147-148 +.. GENERATED FROM PYTHON SOURCE LINES 144-145 The registration needs to declare the parser as well. -.. GENERATED FROM PYTHON SOURCE LINES 148-157 +.. GENERATED FROM PYTHON SOURCE LINES 145-154 .. code-block:: default @@ -220,11 +215,11 @@ The registration needs to declare the parser as well. -.. GENERATED FROM PYTHON SOURCE LINES 158-159 +.. GENERATED FROM PYTHON SOURCE LINES 155-156 And conversion. -.. GENERATED FROM PYTHON SOURCE LINES 159-182 +.. GENERATED FROM PYTHON SOURCE LINES 156-179 .. code-block:: default @@ -257,21 +252,19 @@ And conversion. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (6.046576181972796e-07, 0.0002951417065241126) - (2.01757041429218e-06, 0.0005483764980302357) + (6.04657619085458e-07, 0.0002951417065406967) + (2.01757041717876e-06, 0.0005483764980468156) -.. GENERATED FROM PYTHON SOURCE LINES 183-184 +.. GENERATED FROM PYTHON SOURCE LINES 180-181 It works. The final looks like the following. -.. GENERATED FROM PYTHON SOURCE LINES 184-188 +.. GENERATED FROM PYTHON SOURCE LINES 181-185 .. code-block:: default @@ -285,12 +278,10 @@ It works. The final looks like the following. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxInference(...) - def compiled_run(dict_inputs, yield_ops=None, context=None): + def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None): if yield_ops is not None: raise NotImplementedError('yields_ops should be None.') # init: Ge_Gemmcst1 (Ge_Gemmcst1) @@ -299,8 +290,8 @@ It works. The final looks like the following. # inputs X = dict_inputs['X'] (Su_C0, ) = n0_sub(X, Su_Subcst) - (gemm, ) = n1_gemm(X, Ma_MatMulcst, Ge_Gemmcst1) - (nogemm, ) = n2_matmul(Su_C0, Ma_MatMulcst) + (nogemm, ) = n1_matmul(Su_C0, Ma_MatMulcst) + (gemm, ) = n2_gemm(X, Ma_MatMulcst, Ge_Gemmcst1) return { 'nogemm': nogemm, 'gemm': gemm, @@ -309,12 +300,12 @@ It works. The final looks like the following. -.. GENERATED FROM PYTHON SOURCE LINES 189-191 +.. GENERATED FROM PYTHON SOURCE LINES 186-188 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 191-195 +.. GENERATED FROM PYTHON SOURCE LINES 188-192 .. code-block:: default @@ -337,35 +328,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.597 seconds) + **Total running time of the script:** ( 0 minutes 0.255 seconds) .. _sphx_glr_download_auto_tutorial_plot_mcustom_parser.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_mcustom_parser.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_mcustom_parser.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_mcustom_parser.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_mcustom_parser.ipynb ` + :download:`Download Jupyter notebook: plot_mcustom_parser.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_ngrams.rst.txt b/_sources/auto_tutorial/plot_ngrams.rst.txt new file mode 100644 index 000000000..b37d1cfd4 --- /dev/null +++ b/_sources/auto_tutorial/plot_ngrams.rst.txt @@ -0,0 +1,279 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_tutorial/plot_ngrams.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_auto_tutorial_plot_ngrams.py: + + +.. _example-ngrams: + +Tricky issue when converting CountVectorizer or TfidfVectorizer +=============================================================== + +This issue is described at `scikit-learn/issues/13733 +`_. +If a CountVectorizer or a TfidfVectorizer produces a token with a space, +skl2onnx cannot know if it a bi-grams or a unigram with a space. + +A simple example impossible to convert +++++++++++++++++++++++++++++++++++++++ + +.. GENERATED FROM PYTHON SOURCE LINES 17-40 + +.. code-block:: default + + + import pprint + import numpy + from numpy.testing import assert_almost_equal + from onnxruntime import InferenceSession + from sklearn.feature_extraction.text import TfidfVectorizer + from skl2onnx import to_onnx + from skl2onnx.sklapi import TraceableTfidfVectorizer + import skl2onnx.sklapi.register # noqa + + corpus = numpy.array([ + "This is the first document.", + "This document is the second document.", + "Is this the first document?", + "", + ]).reshape((4, )) + + pattern = r"\b[a-z ]{1,10}\b" + mod1 = TfidfVectorizer(ngram_range=(1, 2), + token_pattern=pattern) + mod1.fit(corpus) + + + + + + + +.. raw:: html + +
+
TfidfVectorizer(ngram_range=(1, 2), token_pattern='\\b[a-z ]{1,10}\\b')
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+ +.. GENERATED FROM PYTHON SOURCE LINES 41-43 + +Unigrams and bi-grams are placed into the following container +which maps it to its column index. + +.. GENERATED FROM PYTHON SOURCE LINES 43-47 + +.. code-block:: default + + + pprint.pprint(mod1.vocabulary_) + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + {'document': 0, + 'document ': 1, + 'document is the ': 2, + 'is the ': 3, + 'is the second ': 4, + 'is this ': 5, + 'is this the first ': 6, + 'second ': 7, + 'second document': 8, + 'the first ': 9, + 'the first document': 10, + 'this ': 11, + 'this document ': 12, + 'this is ': 13, + 'this is the first ': 14} + + + + +.. GENERATED FROM PYTHON SOURCE LINES 48-49 + +Conversion. + +.. GENERATED FROM PYTHON SOURCE LINES 49-56 + +.. code-block:: default + + + try: + to_onnx(mod1, corpus) + except RuntimeError as e: + print(e) + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + There were ambiguities between n-grams and tokens. 2 errors occurred. You can fix it by using class TraceableTfidfVectorizer. + You can learn more at https://github.com/scikit-learn/scikit-learn/issues/13733. + Unable to split n-grams 'is this the first ' into tokens ('is', 'this', 'the', 'first ') existing in the vocabulary. Token 'is' does not exist in the vocabulary.. + Unable to split n-grams 'this is the first ' into tokens ('this', 'is', 'the', 'first ') existing in the vocabulary. Token 'this' does not exist in the vocabulary.. + + + + +.. GENERATED FROM PYTHON SOURCE LINES 57-64 + +TraceableTfidfVectorizer +++++++++++++++++++++++++ + +Class :class:`TraceableTfidfVectorizer` is equivalent to +:class:`sklearn.feature_extraction.text.TfidfVectorizer` +but stores the unigrams and bi-grams of the vocabulary with tuple +instead of concatenating every piece into a string. + +.. GENERATED FROM PYTHON SOURCE LINES 64-72 + +.. code-block:: default + + + + mod2 = TraceableTfidfVectorizer( + ngram_range=(1, 2), token_pattern=pattern) + mod2.fit(corpus) + + pprint.pprint(mod2.vocabulary_) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + {('document',): 0, + ('document ',): 1, + ('document ', 'is the '): 2, + ('is the ',): 3, + ('is the ', 'second '): 4, + ('is this ',): 5, + ('is this ', 'the first '): 6, + ('second ',): 7, + ('second ', 'document'): 8, + ('the first ',): 9, + ('the first ', 'document'): 10, + ('this ',): 11, + ('this ', 'document '): 12, + ('this is ',): 13, + ('this is ', 'the first '): 14} + + + + +.. GENERATED FROM PYTHON SOURCE LINES 73-74 + +Let's check it produces the same results. + +.. GENERATED FROM PYTHON SOURCE LINES 74-78 + +.. code-block:: default + + + assert_almost_equal(mod1.transform(corpus).todense(), + mod2.transform(corpus).todense()) + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 79-83 + +Conversion. Line `import skl2onnx.sklapi.register` +was added to register the converters associated to these +new class. By default, only converters for scikit-learn are +declared. + +.. GENERATED FROM PYTHON SOURCE LINES 83-88 + +.. code-block:: default + + + onx = to_onnx(mod2, corpus) + sess = InferenceSession(onx.SerializeToString()) + got = sess.run(None, {'X': corpus}) + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 89-90 + +Let's check if there are discrepancies... + +.. GENERATED FROM PYTHON SOURCE LINES 90-92 + +.. code-block:: default + + + assert_almost_equal(mod2.transform(corpus).todense(), got[0]) + + + + + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 0.047 seconds) + + +.. _sphx_glr_download_auto_tutorial_plot_ngrams.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: plot_ngrams.py ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: plot_ngrams.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/_sources/auto_tutorial/plot_pextend_python_runtime.rst.txt b/_sources/auto_tutorial/plot_pextend_python_runtime.rst.txt index e755b6639..f33964bf0 100644 --- a/_sources/auto_tutorial/plot_pextend_python_runtime.rst.txt +++ b/_sources/auto_tutorial/plot_pextend_python_runtime.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_pextend_python_runtime.py" +.. "auto_tutorial/plot_pextend_python_runtime.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -42,20 +42,17 @@ The example changes the transformer from decorrelates the variables by computing the eigen values. Method *fit* does not do anything anymore. -.. contents:: - :local: - A transformer which decorrelates variables ++++++++++++++++++++++++++++++++++++++++++ This time, the eigen values are not estimated at training time but at prediction time. -.. GENERATED FROM PYTHON SOURCE LINES 38-132 +.. GENERATED FROM PYTHON SOURCE LINES 35-129 .. code-block:: default - from mlprodict.onnxrt.shape_object import ShapeObject + from mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator from skl2onnx.algebra.onnx_ops import ( OnnxAdd, @@ -66,7 +63,7 @@ training time but at prediction time. OnnxMatMul, OnnxMul, OnnxPow, - OnnxReduceMean, + OnnxReduceMean_13, OnnxShape, OnnxSub, OnnxTranspose, @@ -156,7 +153,7 @@ training time but at prediction time. -.. GENERATED FROM PYTHON SOURCE LINES 133-161 +.. GENERATED FROM PYTHON SOURCE LINES 130-158 Everything works as expected. @@ -187,7 +184,7 @@ New domains are officially supported by :epkg:`onnx` package. We want to create a new operator `Eig` of domain `onnxcustom`. It must be declared in a class, then a converter can use it. -.. GENERATED FROM PYTHON SOURCE LINES 161-193 +.. GENERATED FROM PYTHON SOURCE LINES 158-190 .. code-block:: default @@ -229,8 +226,6 @@ It must be declared in a class, then a converter can use it. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none OnnxEig(1 in) -> ? @@ -238,7 +233,7 @@ It must be declared in a class, then a converter can use it. -.. GENERATED FROM PYTHON SOURCE LINES 194-201 +.. GENERATED FROM PYTHON SOURCE LINES 191-198 Now we can write the converter and the shape calculator. @@ -248,7 +243,7 @@ shape calculator Nothing new here. -.. GENERATED FROM PYTHON SOURCE LINES 201-211 +.. GENERATED FROM PYTHON SOURCE LINES 198-208 .. code-block:: default @@ -269,7 +264,7 @@ Nothing new here. -.. GENERATED FROM PYTHON SOURCE LINES 212-218 +.. GENERATED FROM PYTHON SOURCE LINES 209-215 converter ^^^^^^^^^ @@ -278,7 +273,7 @@ The converter is using the class `OnnxEig`. The code is longer than previous converters as the computation is more complex too. -.. GENERATED FROM PYTHON SOURCE LINES 218-309 +.. GENERATED FROM PYTHON SOURCE LINES 215-306 .. code-block:: default @@ -301,7 +296,7 @@ more complex too. # Lines in comment specify the numpy computation # the ONNX code implements. # mean_ = numpy.mean(X, axis=0, keepdims=True) - mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv) + mean = OnnxReduceMean_13(X, axes=[0], keepdims=1, op_version=opv) # This is trick I often use. The converter automatically # chooses a name for every output. In big graph, @@ -380,7 +375,7 @@ more complex too. -.. GENERATED FROM PYTHON SOURCE LINES 310-322 +.. GENERATED FROM PYTHON SOURCE LINES 307-319 Runtime for Eig ^^^^^^^^^^^^^^^ @@ -395,7 +390,7 @@ empty shape if it depends on the inputs. If it is known, the runtime may be able to optimize the computation, by reducing allocation for example. -.. GENERATED FROM PYTHON SOURCE LINES 322-354 +.. GENERATED FROM PYTHON SOURCE LINES 319-338 .. code-block:: default @@ -417,19 +412,6 @@ by reducing allocation for example. return numpy.linalg.eig(x) return (numpy.linalg.eigvals(x), ) - def infer_shapes(self, x): - # shape inference, if you don't know what to - # write, just return `ShapeObject(None)` - if self.eigv: - return ( - ShapeObject( - x.shape, dtype=x.dtype, - name=self.__class__.__name__ + 'Values'), - ShapeObject( - x.shape, dtype=x.dtype, - name=self.__class__.__name__ + 'Vectors')) - return (ShapeObject(x.shape, dtype=x.dtype, - name=self.__class__.__name__), ) @@ -438,12 +420,12 @@ by reducing allocation for example. -.. GENERATED FROM PYTHON SOURCE LINES 355-357 +.. GENERATED FROM PYTHON SOURCE LINES 339-341 Registration ^^^^^^^^^^^^ -.. GENERATED FROM PYTHON SOURCE LINES 357-364 +.. GENERATED FROM PYTHON SOURCE LINES 341-348 .. code-block:: default @@ -461,12 +443,12 @@ Registration -.. GENERATED FROM PYTHON SOURCE LINES 365-367 +.. GENERATED FROM PYTHON SOURCE LINES 349-351 Final example +++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 367-394 +.. GENERATED FROM PYTHON SOURCE LINES 351-378 .. code-block:: default @@ -478,7 +460,7 @@ Final example dec = LiveDecorrelateTransformer() dec.fit(X) - onx = to_onnx(dec, X.astype(numpy.float32)) + onx = to_onnx(dec, X.astype(numpy.float32), target_opset=17) register_operator(OpEig, name='Eig', overwrite=False) @@ -503,8 +485,6 @@ Final example .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (0.0, 0.0) @@ -512,16 +492,16 @@ Final example -.. GENERATED FROM PYTHON SOURCE LINES 395-396 +.. GENERATED FROM PYTHON SOURCE LINES 379-380 It works! -.. GENERATED FROM PYTHON SOURCE LINES 398-400 +.. GENERATED FROM PYTHON SOURCE LINES 382-384 Final graph +++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 400-405 +.. GENERATED FROM PYTHON SOURCE LINES 384-389 .. code-block:: default @@ -545,35 +525,23 @@ Final graph .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.441 seconds) + **Total running time of the script:** ( 0 minutes 0.229 seconds) .. _sphx_glr_download_auto_tutorial_plot_pextend_python_runtime.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_pextend_python_runtime.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_pextend_python_runtime.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_pextend_python_runtime.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_pextend_python_runtime.ipynb ` + :download:`Download Jupyter notebook: plot_pextend_python_runtime.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_qextend_onnxruntime.rst.txt b/_sources/auto_tutorial/plot_qextend_onnxruntime.rst.txt index 89cc7d045..f39b4c5e6 100644 --- a/_sources/auto_tutorial/plot_qextend_onnxruntime.rst.txt +++ b/_sources/auto_tutorial/plot_qextend_onnxruntime.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_qextend_onnxruntime.py" +.. "auto_tutorial/plot_qextend_onnxruntime.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -44,30 +44,18 @@ C++... .. _sphx_glr_download_auto_tutorial_plot_qextend_onnxruntime.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_qextend_onnxruntime.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_qextend_onnxruntime.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_qextend_onnxruntime.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_qextend_onnxruntime.ipynb ` + :download:`Download Jupyter notebook: plot_qextend_onnxruntime.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_transformer_discrepancy.rst.txt b/_sources/auto_tutorial/plot_transformer_discrepancy.rst.txt new file mode 100644 index 000000000..7a53c462b --- /dev/null +++ b/_sources/auto_tutorial/plot_transformer_discrepancy.rst.txt @@ -0,0 +1,273 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_tutorial/plot_transformer_discrepancy.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_auto_tutorial_plot_transformer_discrepancy.py: + + +.. _example-transform-discrepancy: + +Dealing with discrepancies (tf-idf) +=================================== + +.. index:: td-idf + +`TfidfVectorizer `_ +is one transform for which the corresponding converted onnx model +may produce different results. The larger the vocabulary is, +the higher the probability to get different result is. +This example proposes a equivalent model with no discrepancies. + +Imports, setups ++++++++++++++++ + +All imports. It also registered onnx converters for :epgk:`xgboost` +and :epkg:`lightgbm`. + +.. GENERATED FROM PYTHON SOURCE LINES 22-58 + +.. code-block:: default + + import pprint + import numpy + from sklearn.pipeline import Pipeline + from sklearn.compose import ColumnTransformer + from sklearn.feature_extraction.text import TfidfVectorizer + from onnxruntime import InferenceSession + from skl2onnx import to_onnx + + + def print_sparse_matrix(m): + nonan = numpy.nan_to_num(m) + mi, ma = nonan.min(), nonan.max() + if mi == ma: + ma += 1 + mat = numpy.empty(m.shape, dtype=numpy.str_) + mat[:, :] = '.' + if hasattr(m, 'todense'): + dense = m.todense() + else: + dense = m + for i in range(m.shape[0]): + for j in range(m.shape[1]): + if dense[i, j] > 0: + c = int((dense[i, j] - mi) / (ma - mi) * 25) + mat[i, j] = chr(ord('A') + c) + return '\n'.join(''.join(line) for line in mat) + + + def diff(a, b): + if a.shape != b.shape: + raise ValueError( + f"Cannot compare matrices with different shapes " + f"{a.shape} != {b.shape}.") + d = numpy.abs(a - b).sum() / a.size + return d + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 59-63 + +Artificial datasets ++++++++++++++++++++ + +Iris + a text column. + +.. GENERATED FROM PYTHON SOURCE LINES 63-78 + +.. code-block:: default + + + + strings = numpy.array([ + "This a sentence.", + "This a sentence with more characters $^*&'(-...", + """var = ClassName(var2, user=mail@anywhere.com, pwd""" + """=")_~-('&]@^\\`|[{#")""", + "c79857654", + "https://complex-url.com/;76543u3456?g=hhh&h=23", + "01-03-05T11:12:13", + "https://complex-url.com/;dd76543u3456?g=ddhhh&h=23", + ]).reshape((-1, 1)) + + pprint.pprint(strings) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + array([['This a sentence.'], + ["This a sentence with more characters $^*&'(-..."], + ['var = ClassName(var2, user=mail@anywhere.com, pwd=")_~-(\'&]@^\\`|[{#")'], + ['c79857654'], + ['https://complex-url.com/;76543u3456?g=hhh&h=23'], + ['01-03-05T11:12:13'], + ['https://complex-url.com/;dd76543u3456?g=ddhhh&h=23']], + dtype='` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: plot_transformer_discrepancy.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/_sources/auto_tutorial/plot_usparse_xgboost.rst.txt b/_sources/auto_tutorial/plot_usparse_xgboost.rst.txt index 42bace8ed..45164cfc4 100644 --- a/_sources/auto_tutorial/plot_usparse_xgboost.rst.txt +++ b/_sources/auto_tutorial/plot_usparse_xgboost.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_usparse_xgboost.py" +.. "auto_tutorial/plot_usparse_xgboost.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -34,16 +34,13 @@ as they are not present in the datasets. Because some predictors do the difference, this ambiguity may introduces discrepencies when converter into ONNX. This example looks into several configurations. -.. contents:: - :local: - Imports, setups +++++++++++++++ All imports. It also registered onnx converters for :epgk:`xgboost` and *lightgbm*. -.. GENERATED FROM PYTHON SOURCE LINES 29-67 +.. GENERATED FROM PYTHON SOURCE LINES 26-64 .. code-block:: default @@ -92,14 +89,14 @@ and *lightgbm*. -.. GENERATED FROM PYTHON SOURCE LINES 68-72 +.. GENERATED FROM PYTHON SOURCE LINES 65-69 Artificial datasets +++++++++++++++++++++++++++ Iris + a text column. -.. GENERATED FROM PYTHON SOURCE LINES 72-89 +.. GENERATED FROM PYTHON SOURCE LINES 69-87 .. code-block:: default @@ -111,6 +108,7 @@ Iris + a text column. y = data.target df = pandas.DataFrame(X) + df.columns = [f"c{c}" for c in df.columns] df["text"] = [cst[i] for i in y] @@ -127,7 +125,7 @@ Iris + a text column. -.. GENERATED FROM PYTHON SOURCE LINES 90-96 +.. GENERATED FROM PYTHON SOURCE LINES 88-94 Train ensemble after sparse +++++++++++++++++++++++++++ @@ -136,7 +134,7 @@ The example use the Iris datasets with artifical text datasets preprocessed with a tf-idf. `sparse_threshold=1.` avoids sparse matrices to be converted into dense matrices. -.. GENERATED FROM PYTHON SOURCE LINES 96-193 +.. GENERATED FROM PYTHON SOURCE LINES 94-192 .. code-block:: default @@ -193,7 +191,8 @@ sparse matrices to be converted into dense matrices. try: pipe.fit(df_train, y_train) except TypeError as e: - obs = dict(model=model.__name__, pipe=pipe, error=e) + obs = dict(model=model.__name__, pipe=pipe, error=e, + model_onnx=None) pipes.append(obs) continue @@ -215,7 +214,7 @@ sparse matrices to be converted into dense matrices. f.write(model_onnx.SerializeToString()) sess = rt.InferenceSession(model_onnx.SerializeToString()) - inputs = {"input": df[[0, 1]].values.astype(numpy.float32), + inputs = {"input": df[["c0", "c1"]].values.astype(numpy.float32), "text": df[["text"]].values} pred_onx = sess.run(None, inputs) @@ -243,17 +242,16 @@ sparse matrices to be converted into dense matrices. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00 0 RandomForestClassifier - 0.000006 + 0.000004 NaN @@ -298,13 +296,13 @@ sparse matrices to be converted into dense matrices. 2 XGBClassifier - 22.189188 + 5.278442 NaN 3 LGBMClassifier - 0.000007 + 0.000009 NaN @@ -314,7 +312,7 @@ sparse matrices to be converted into dense matrices.

-.. GENERATED FROM PYTHON SOURCE LINES 194-200 +.. GENERATED FROM PYTHON SOURCE LINES 193-199 Sparse data hurts. @@ -323,7 +321,7 @@ Dense data Let's replace sparse data with dense by using `sparse_threshold=0.` -.. GENERATED FROM PYTHON SOURCE LINES 200-208 +.. GENERATED FROM PYTHON SOURCE LINES 199-207 .. code-block:: default @@ -341,12 +339,11 @@ Let's replace sparse data with dense by using `sparse_threshold=0.` .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00 0 RandomForestClassifier - 0.000004 + 0.000005 1 @@ -388,12 +385,12 @@ Let's replace sparse data with dense by using `sparse_threshold=0.` 2 XGBClassifier - 0.000004 + 0.000006 3 LGBMClassifier - 0.000007 + 0.000009 @@ -402,12 +399,12 @@ Let's replace sparse data with dense by using `sparse_threshold=0.`

-.. GENERATED FROM PYTHON SOURCE LINES 209-211 +.. GENERATED FROM PYTHON SOURCE LINES 208-210 This is much better. Let's compare how the preprocessing applies on the data. -.. GENERATED FROM PYTHON SOURCE LINES 211-218 +.. GENERATED FROM PYTHON SOURCE LINES 210-217 .. code-block:: default @@ -424,8 +421,6 @@ applies on the data. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none sparse @@ -445,7 +440,7 @@ applies on the data. -.. GENERATED FROM PYTHON SOURCE LINES 219-238 +.. GENERATED FROM PYTHON SOURCE LINES 218-237 This shows `RandomForestClassifier 0 RandomForestClassifier - 43.120465 + 35.336951 NaN @@ -540,13 +534,13 @@ replace null values by nan in the onnx graph. 2 XGBClassifier - 0.000004 + 0.000006 NaN 3 LGBMClassifier - 0.000007 + 0.000009 NaN @@ -556,7 +550,7 @@ replace null values by nan in the onnx graph.

-.. GENERATED FROM PYTHON SOURCE LINES 247-256 +.. GENERATED FROM PYTHON SOURCE LINES 246-255 Dense, 0 replaced by nan ++++++++++++++++++++++++ @@ -568,7 +562,7 @@ A new converter is added to the list of supported models. It is equivalent to the previous options except it is more explicit. -.. GENERATED FROM PYTHON SOURCE LINES 256-264 +.. GENERATED FROM PYTHON SOURCE LINES 255-263 .. code-block:: default @@ -586,17 +580,16 @@ more explicit. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - 0%| | 0/4 [00:00 0 RandomForestClassifier - 22.411109 + 34.057222 NaN @@ -641,13 +634,13 @@ more explicit. 2 XGBClassifier - 0.000004 + 0.000006 NaN 3 LGBMClassifier - 0.000007 + 0.000009 NaN @@ -657,7 +650,7 @@ more explicit.

-.. GENERATED FROM PYTHON SOURCE LINES 265-271 +.. GENERATED FROM PYTHON SOURCE LINES 264-270 Conclusion ++++++++++ @@ -669,35 +662,23 @@ tuned depending on the model which follows the TfIdf preprocessing. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.950 seconds) + **Total running time of the script:** ( 0 minutes 0.807 seconds) .. _sphx_glr_download_auto_tutorial_plot_usparse_xgboost.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_usparse_xgboost.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_usparse_xgboost.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_usparse_xgboost.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_usparse_xgboost.ipynb ` + :download:`Download Jupyter notebook: plot_usparse_xgboost.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_wext_pyod_forest.rst.txt b/_sources/auto_tutorial/plot_wext_pyod_forest.rst.txt index 59c5d6bf4..122289427 100644 --- a/_sources/auto_tutorial/plot_wext_pyod_forest.rst.txt +++ b/_sources/auto_tutorial/plot_wext_pyod_forest.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_wext_pyod_forest.py" +.. "auto_tutorial/plot_wext_pyod_forest.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -32,16 +32,13 @@ It implements a custom converter for model `pyod.models.iforest.IForest pyod.models.html#module-pyod.models.iforest>`_. This example uses :ref:`l-plot-custom-converter` as a start. -.. contents:: - :local: - Trains a model ++++++++++++++ All imports. It also registered onnx converters for :epgk:`xgboost` and *lightgbm*. -.. GENERATED FROM PYTHON SOURCE LINES 27-63 +.. GENERATED FROM PYTHON SOURCE LINES 24-60 .. code-block:: default @@ -64,7 +61,7 @@ and *lightgbm*. IForest = None if IForest is not None: - data1 = {'First': [500, 500, 400, 100, 200, 300, 100], + data1 = {'First': [500, 500, 400, 100, 200, 300, 100], 'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']} df1 = pd.DataFrame(data1, columns=['First', 'Second']) @@ -87,20 +84,20 @@ and *lightgbm*. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - Unable to import pyod: No module named 'pyod' + [Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers. + [Parallel(n_jobs=8)]: Done 2 out of 8 | elapsed: 0.0s remaining: 0.0s + [Parallel(n_jobs=8)]: Done 8 out of 8 | elapsed: 0.0s finished -.. GENERATED FROM PYTHON SOURCE LINES 64-65 +.. GENERATED FROM PYTHON SOURCE LINES 61-62 We check that the conversion fails as expected. -.. GENERATED FROM PYTHON SOURCE LINES 65-73 +.. GENERATED FROM PYTHON SOURCE LINES 62-70 .. code-block:: default @@ -116,10 +113,28 @@ We check that the conversion fails as expected. +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + Unable to find a shape calculator for type ''. + It usually means the pipeline being converted contains a + transformer or a predictor with no corresponding converter + implemented in sklearn-onnx. If the converted is implemented + in another library, you need to register + the converted so that it can be used by sklearn-onnx (function + update_registered_converter). If the model is not yet covered + by sklearn-onnx, you may raise an issue to + https://github.com/onnx/sklearn-onnx/issues + to get the converter implemented or even contribute to the + project. If the model is a custom model, a new converter must + be implemented. Examples can be found in the gallery. + + -.. GENERATED FROM PYTHON SOURCE LINES 74-80 +.. GENERATED FROM PYTHON SOURCE LINES 71-77 Custom converter ++++++++++++++++ @@ -128,7 +143,7 @@ First the parser and the shape calculator. The parser defines the number of outputs and their type. The shape calculator defines their dimensions. -.. GENERATED FROM PYTHON SOURCE LINES 80-104 +.. GENERATED FROM PYTHON SOURCE LINES 77-101 .. code-block:: default @@ -163,11 +178,11 @@ The shape calculator defines their dimensions. -.. GENERATED FROM PYTHON SOURCE LINES 105-106 +.. GENERATED FROM PYTHON SOURCE LINES 102-103 Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 106-159 +.. GENERATED FROM PYTHON SOURCE LINES 103-156 .. code-block:: default @@ -231,11 +246,11 @@ Then the converter. -.. GENERATED FROM PYTHON SOURCE LINES 160-161 +.. GENERATED FROM PYTHON SOURCE LINES 157-158 Finally the registration. -.. GENERATED FROM PYTHON SOURCE LINES 161-170 +.. GENERATED FROM PYTHON SOURCE LINES 158-167 .. code-block:: default @@ -255,11 +270,11 @@ Finally the registration. -.. GENERATED FROM PYTHON SOURCE LINES 171-172 +.. GENERATED FROM PYTHON SOURCE LINES 168-169 And the conversion. -.. GENERATED FROM PYTHON SOURCE LINES 172-177 +.. GENERATED FROM PYTHON SOURCE LINES 169-174 .. code-block:: default @@ -272,15 +287,22 @@ And the conversion. +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + [0.75171798] + [13.95064645] -.. GENERATED FROM PYTHON SOURCE LINES 178-180 + +.. GENERATED FROM PYTHON SOURCE LINES 175-177 Checking discrepencies ++++++++++++++++++++++ -.. GENERATED FROM PYTHON SOURCE LINES 180-200 +.. GENERATED FROM PYTHON SOURCE LINES 177-197 .. code-block:: default @@ -308,41 +330,49 @@ Checking discrepencies +.. rst-class:: sphx-glr-script-out + .. code-block:: none + dicrepencies: 0 8.684300415451318e-07 + ONNX labels [[0] + [0] + [0] + [0] + [0] + [0] + [1]] + ONNX probabilities [[1. 0. ] + [0.809063 0.19093698] + [1. 0. ] + [0.41380423 0.58619577] + [0.61369824 0.38630173] + [0.809063 0.19093698] + [0. 1. ]] -.. rst-class:: sphx-glr-timing - - **Total running time of the script:** ( 0 minutes 0.006 seconds) - - -.. _sphx_glr_download_auto_tutorial_plot_wext_pyod_forest.py: -.. only :: html - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example +.. rst-class:: sphx-glr-timing + **Total running time of the script:** ( 0 minutes 0.502 seconds) - .. container:: binder-badge - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_wext_pyod_forest.ipynb - :alt: Launch binder - :width: 150 px +.. _sphx_glr_download_auto_tutorial_plot_wext_pyod_forest.py: +.. only:: html - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_wext_pyod_forest.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_wext_pyod_forest.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_wext_pyod_forest.ipynb ` + :download:`Download Jupyter notebook: plot_wext_pyod_forest.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/plot_woe_transformer.rst.txt b/_sources/auto_tutorial/plot_woe_transformer.rst.txt index 3a055c9fb..1f7f782d0 100644 --- a/_sources/auto_tutorial/plot_woe_transformer.rst.txt +++ b/_sources/auto_tutorial/plot_woe_transformer.rst.txt @@ -2,7 +2,7 @@ .. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: -.. "auto_tutorial\plot_woe_transformer.py" +.. "auto_tutorial/plot_woe_transformer.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html @@ -11,7 +11,7 @@ :class: sphx-glr-download-link-note Click :ref:`here ` - to download the full example code or to run this example in your browser via Binder + to download the full example code .. rst-class:: sphx-glr-example-title @@ -29,9 +29,6 @@ The results is the label of every intervals containing the feature. .. index:: WOE, WOETransformer -.. contents:: - :local: - A simple example ++++++++++++++++ @@ -42,7 +39,7 @@ checks that every of them belongs to two intervals, (left-right-closed). The first interval is associated to weight 55 and and the second one to 107. -.. GENERATED FROM PYTHON SOURCE LINES 28-52 +.. GENERATED FROM PYTHON SOURCE LINES 25-49 .. code-block:: default @@ -158,7 +155,7 @@ to weight 55 and and the second one to 107.

-.. GENERATED FROM PYTHON SOURCE LINES 53-58 +.. GENERATED FROM PYTHON SOURCE LINES 50-55 One Hot +++++++ @@ -166,7 +163,7 @@ One Hot The transformer outputs one column with the weights. But it could return one column per interval. -.. GENERATED FROM PYTHON SOURCE LINES 58-67 +.. GENERATED FROM PYTHON SOURCE LINES 55-64 .. code-block:: default @@ -278,12 +275,12 @@ But it could return one column per interval.

-.. GENERATED FROM PYTHON SOURCE LINES 68-70 +.. GENERATED FROM PYTHON SOURCE LINES 65-67 In that case, weights can be omitted. The output is binary. -.. GENERATED FROM PYTHON SOURCE LINES 70-79 +.. GENERATED FROM PYTHON SOURCE LINES 67-76 .. code-block:: default @@ -395,7 +392,7 @@ The output is binary.

-.. GENERATED FROM PYTHON SOURCE LINES 80-86 +.. GENERATED FROM PYTHON SOURCE LINES 77-83 Conversion to ONNX ++++++++++++++++++ @@ -404,7 +401,7 @@ Conversion to ONNX onehot=False -.. GENERATED FROM PYTHON SOURCE LINES 86-90 +.. GENERATED FROM PYTHON SOURCE LINES 83-87 .. code-block:: default @@ -418,8 +415,6 @@ onehot=False .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0.] @@ -436,11 +431,11 @@ onehot=False -.. GENERATED FROM PYTHON SOURCE LINES 91-92 +.. GENERATED FROM PYTHON SOURCE LINES 88-89 onehot=True -.. GENERATED FROM PYTHON SOURCE LINES 92-97 +.. GENERATED FROM PYTHON SOURCE LINES 89-94 .. code-block:: default @@ -455,8 +450,6 @@ onehot=True .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 0. 0.] @@ -473,14 +466,14 @@ onehot=True -.. GENERATED FROM PYTHON SOURCE LINES 98-102 +.. GENERATED FROM PYTHON SOURCE LINES 95-99 ONNX Graphs +++++++++++ onehot=False -.. GENERATED FROM PYTHON SOURCE LINES 102-116 +.. GENERATED FROM PYTHON SOURCE LINES 99-113 .. code-block:: default @@ -509,20 +502,18 @@ onehot=False .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2129.5, 3321.5, -0.5) + (-0.5, 2674.5, 3321.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 117-118 +.. GENERATED FROM PYTHON SOURCE LINES 114-115 onehot=True -.. GENERATED FROM PYTHON SOURCE LINES 118-132 +.. GENERATED FROM PYTHON SOURCE LINES 115-129 .. code-block:: default @@ -551,16 +542,14 @@ onehot=True .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (-0.5, 2272.5, 5696.5, -0.5) + (-0.5, 2743.5, 5696.5, -0.5) -.. GENERATED FROM PYTHON SOURCE LINES 133-138 +.. GENERATED FROM PYTHON SOURCE LINES 130-135 Half-line +++++++++ @@ -568,7 +557,7 @@ Half-line An interval may have only one extremity defined and the other can be infinite. -.. GENERATED FROM PYTHON SOURCE LINES 138-150 +.. GENERATED FROM PYTHON SOURCE LINES 135-147 .. code-block:: default @@ -672,11 +661,11 @@ can be infinite.

-.. GENERATED FROM PYTHON SOURCE LINES 151-152 +.. GENERATED FROM PYTHON SOURCE LINES 148-149 And the conversion to ONNX using the same instruction. -.. GENERATED FROM PYTHON SOURCE LINES 152-156 +.. GENERATED FROM PYTHON SOURCE LINES 149-153 .. code-block:: default @@ -690,8 +679,6 @@ And the conversion to ONNX using the same instruction. .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [[ 55.] @@ -711,35 +698,23 @@ And the conversion to ONNX using the same instruction. .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 2.833 seconds) + **Total running time of the script:** ( 0 minutes 3.361 seconds) .. _sphx_glr_download_auto_tutorial_plot_woe_transformer.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - .. container:: binder-badge - - .. image:: images/binder_badge_logo.svg - :target: https://mybinder.org/v2/gh/onnx/onnx.ai/sklearn-onnx//master?filepath=auto_examples/auto_tutorial/plot_woe_transformer.ipynb - :alt: Launch binder - :width: 150 px - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: plot_woe_transformer.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: plot_woe_transformer.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: plot_woe_transformer.ipynb ` + :download:`Download Jupyter notebook: plot_woe_transformer.ipynb ` .. only:: html diff --git a/_sources/auto_tutorial/sg_execution_times.rst.txt b/_sources/auto_tutorial/sg_execution_times.rst.txt index 188329628..b7c6d7b59 100644 --- a/_sources/auto_tutorial/sg_execution_times.rst.txt +++ b/_sources/auto_tutorial/sg_execution_times.rst.txt @@ -5,58 +5,64 @@ Computation times ================= -**01:31.345** total execution time for **auto_tutorial** files: +**01:55.854** total execution time for **auto_tutorial** files: +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm_reg.py` (``plot_gexternal_lightgbm_reg.py``) | 00:31.683 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_bbegin_measure_time.py` (``plot_bbegin_measure_time.py``) | 00:55.146 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_bbegin_measure_time.py` (``plot_bbegin_measure_time.py``) | 00:31.317 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm_reg.py` (``plot_gexternal_lightgbm_reg.py``) | 00:39.867 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_cbegin_opset.py` (``plot_cbegin_opset.py``) | 00:05.264 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_woe_transformer.py` (``plot_woe_transformer.py``) | 00:03.361 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gbegin_transfer_learning.py` (``plot_gbegin_transfer_learning.py``) | 00:05.219 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gbegin_transfer_learning.py` (``plot_gbegin_transfer_learning.py``) | 00:02.727 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_woe_transformer.py` (``plot_woe_transformer.py``) | 00:02.833 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_lcustom_options.py` (``plot_lcustom_options.py``) | 00:01.917 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_lcustom_options.py` (``plot_lcustom_options.py``) | 00:02.376 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_cbegin_opset.py` (``plot_cbegin_opset.py``) | 00:01.345 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options.py` (``plot_dbegin_options.py``) | 00:01.810 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options.py` (``plot_dbegin_options.py``) | 00:01.322 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_list.py` (``plot_dbegin_options_list.py``) | 00:01.509 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gexternal_catboost.py` (``plot_gexternal_catboost.py``) | 00:01.134 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gbegin_dataframe.py` (``plot_gbegin_dataframe.py``) | 00:01.416 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gbegin_dataframe.py` (``plot_gbegin_dataframe.py``) | 00:01.130 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_abegin_convert_pipeline.py` (``plot_abegin_convert_pipeline.py``) | 00:01.391 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_list.py` (``plot_dbegin_options_list.py``) | 00:01.110 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_usparse_xgboost.py` (``plot_usparse_xgboost.py``) | 00:00.950 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_abegin_convert_pipeline.py` (``plot_abegin_convert_pipeline.py``) | 00:01.079 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_ebegin_float_double.py` (``plot_ebegin_float_double.py``) | 00:00.825 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_ebegin_float_double.py` (``plot_ebegin_float_double.py``) | 00:00.838 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_mcustom_parser.py` (``plot_mcustom_parser.py``) | 00:00.597 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_usparse_xgboost.py` (``plot_usparse_xgboost.py``) | 00:00.807 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gexternal_xgboost.py` (``plot_gexternal_xgboost.py``) | 00:00.586 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gexternal_xgboost.py` (``plot_gexternal_xgboost.py``) | 00:00.570 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_fbegin_investigate.py` (``plot_fbegin_investigate.py``) | 00:00.573 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm.py` (``plot_gexternal_lightgbm.py``) | 00:00.542 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_kcustom_converter_wrapper.py` (``plot_kcustom_converter_wrapper.py``) | 00:00.530 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_fbegin_investigate.py` (``plot_fbegin_investigate.py``) | 00:00.505 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gexternal_lightgbm.py` (``plot_gexternal_lightgbm.py``) | 00:00.510 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_wext_pyod_forest.py` (``plot_wext_pyod_forest.py``) | 00:00.502 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_catwoe_transformer.py` (``plot_catwoe_transformer.py``) | 00:00.459 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_kcustom_converter_wrapper.py` (``plot_kcustom_converter_wrapper.py``) | 00:00.499 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_pextend_python_runtime.py` (``plot_pextend_python_runtime.py``) | 00:00.441 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_icustom_converter.py` (``plot_icustom_converter.py``) | 00:00.321 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_icustom_converter.py` (``plot_icustom_converter.py``) | 00:00.406 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_zipmap.py` (``plot_dbegin_options_zipmap.py``) | 00:00.267 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_dbegin_options_zipmap.py` (``plot_dbegin_options_zipmap.py``) | 00:00.382 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_mcustom_parser.py` (``plot_mcustom_parser.py``) | 00:00.255 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gconverting.py` (``plot_gconverting.py``) | 00:00.103 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_pextend_python_runtime.py` (``plot_pextend_python_runtime.py``) | 00:00.229 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_gbegin_cst.py` (``plot_gbegin_cst.py``) | 00:00.081 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_catwoe_transformer.py` (``plot_catwoe_transformer.py``) | 00:00.173 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_jcustom_syntax.py` (``plot_jcustom_syntax.py``) | 00:00.078 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_gconverting.py` (``plot_gconverting.py``) | 00:00.063 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_tutorial_plot_wext_pyod_forest.py` (``plot_wext_pyod_forest.py``) | 00:00.006 | 0.0 MB | +| :ref:`sphx_glr_auto_tutorial_plot_ngrams.py` (``plot_ngrams.py``) | 00:00.047 | 0.0 MB | ++---------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_tutorial_plot_gbegin_cst.py` (``plot_gbegin_cst.py``) | 00:00.040 | 0.0 MB | ++---------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_tutorial_plot_transformer_discrepancy.py` (``plot_transformer_discrepancy.py``) | 00:00.030 | 0.0 MB | ++---------------------------------------------------------------------------------------------------------+-----------+--------+ +| :ref:`sphx_glr_auto_tutorial_plot_jcustom_syntax.py` (``plot_jcustom_syntax.py``) | 00:00.028 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_auto_tutorial_plot_qextend_onnxruntime.py` (``plot_qextend_onnxruntime.py``) | 00:00.000 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt index e397cf31f..02ba5ade9 100644 --- a/_sources/index.rst.txt +++ b/_sources/index.rst.txt @@ -16,7 +16,7 @@ sklearn-onnx: Convert your scikit-learn model into ONNX *sklearn-onnx* enables you to convert models from -`sklearn-learn `_ +`scikit-learn `_ toolkits into `ONNX `_. .. toctree:: @@ -92,7 +92,7 @@ Every converter is tested with this backend. # Compute the prediction with ONNX Runtime import onnxruntime as rt import numpy - sess = rt.InferenceSession("rf_iris.onnx") + sess = rt.InferenceSession("rf_iris.onnx", providers=["CPUExecutionProvider"]) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0] diff --git a/_sources/introduction.rst.txt b/_sources/introduction.rst.txt index 67c59e808..a64c96d91 100644 --- a/_sources/introduction.rst.txt +++ b/_sources/introduction.rst.txt @@ -5,9 +5,6 @@ Introduction ============ -.. contents:: - :local: - Quick start =========== @@ -75,7 +72,7 @@ for this machine learning model. :: import onnxruntime as rt - sess = rt.InferenceSession("logreg_iris.onnx") + sess = rt.InferenceSession("logreg_iris.onnx", providers=["CPUExecutionProvider"]) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name diff --git a/_sources/parameterized.rst.txt b/_sources/parameterized.rst.txt index 800b33c2d..6bf236bce 100644 --- a/_sources/parameterized.rst.txt +++ b/_sources/parameterized.rst.txt @@ -16,9 +16,6 @@ or :func:`to_onnx `. Every option ends up creating a different ONNX graph. Below is the list of models which enable this mechanism. -.. contents:: - :local: - GaussianProcessRegressor, NearestNeighbors ========================================== diff --git a/_sources/pipeline.rst.txt b/_sources/pipeline.rst.txt index 9f571342a..37dd9d8e3 100644 --- a/_sources/pipeline.rst.txt +++ b/_sources/pipeline.rst.txt @@ -5,9 +5,6 @@ Convert a pipeline ================== -.. contents:: - :local: - *skl2onnx* converts any machine learning pipeline into *ONNX* pipelines. Every transformer or predictors is converted into one or multiple nodes into the *ONNX* graph. @@ -269,7 +266,8 @@ a pipeline and each of its components independently. onnx_step = op['onnx_step'] # Use onnxruntime to compute ONNX outputs - sess = onnxruntime.InferenceSession(onnx_step.SerializeToString()) + sess = onnxruntime.InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) # Let's use the initial data as the ONNX model # contains all nodes from the first inputs to this node. @@ -354,7 +352,8 @@ them. # If it does not fail, let's compare the ONNX outputs with # the original operator. - sess = onnxruntime.InferenceSession(onnx_step.SerializeToString()) + sess = onnxruntime.InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'input': data_in}) onnx_output = onnx_outputs[0] skl_outputs = op._debug.outputs['transform'] diff --git a/_sources/supported.rst.txt b/_sources/supported.rst.txt index 438290e7e..bda1942cb 100644 --- a/_sources/supported.rst.txt +++ b/_sources/supported.rst.txt @@ -14,9 +14,6 @@ such as :class:`OnnxSklearnPipeline` does. They wrap existing which inherits from :class:`OnnxOperatorMixin` which implements *to_onnx* methods. -.. contents:: - :local: - .. _l-converter-list: Covered Converters diff --git a/_sources/tutorial_1-5_external.rst.txt b/_sources/tutorial_1-5_external.rst.txt index 9032ded7f..a224b72a0 100644 --- a/_sources/tutorial_1-5_external.rst.txt +++ b/_sources/tutorial_1-5_external.rst.txt @@ -16,3 +16,4 @@ model are part of a pipeline. auto_tutorial/plot_gexternal_lightgbm auto_tutorial/plot_gexternal_lightgbm_reg auto_tutorial/plot_gexternal_xgboost + auto_tutorial/plot_gexternal_catboost diff --git a/_sources/tutorial_4_advanced.rst.txt b/_sources/tutorial_4_advanced.rst.txt index ad56f8d20..cd792de6e 100644 --- a/_sources/tutorial_4_advanced.rst.txt +++ b/_sources/tutorial_4_advanced.rst.txt @@ -10,5 +10,6 @@ with issues and resolved issues. .. toctree:: :maxdepth: 1 + auto_tutorial/plot_ngrams auto_tutorial/plot_usparse_xgboost auto_tutorial/plot_woe_transformer diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8549469dc..000000000 --- a/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,134 +0,0 @@ -/* - * _sphinx_javascript_frameworks_compat.js - * ~~~~~~~~~~ - * - * Compatability shim for jQuery and underscores.js. - * - * WILL BE REMOVED IN Sphinx 6.0 - * xref RemovedInSphinx60Warning - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/_static/basic.css b/_static/basic.css index 617781817..7577acb1a 100644 --- a/_static/basic.css +++ b/_static/basic.css @@ -1,928 +1,903 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 270px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li p.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 360px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, figure.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, figure.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, figure.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, figure.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar, -aside.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -aside.sidebar > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -aside.sidebar::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure, figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption, figcaption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number, -figcaption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text, -figcaption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - -/* -- object description styles --------------------------------------------- */ - -.sig { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; -} - -.sig-name, code.descname { - background-color: transparent; - font-weight: bold; -} - -.sig-name { - font-size: 1.1em; -} - -code.descname { - font-size: 1.2em; -} - -.sig-prename, code.descclassname { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.sig-param.n { - font-style: italic; -} - -/* C++ specific styling */ - -.sig-inline.c-texpr, -.sig-inline.cpp-texpr { - font-family: unset; -} - -.sig.c .k, .sig.c .kt, -.sig.cpp .k, .sig.cpp .kt { - color: #0033B3; -} - -.sig.c .m, -.sig.cpp .m { - color: #1750EB; -} - -.sig.c .s, .sig.c .sc, -.sig.cpp .s, .sig.cpp .sc { - color: #067D17; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -/* Docutils 0.17 and older (footnotes & citations) */ -dl.footnote > dt, -dl.citation > dt { - float: left; - margin-right: 0.5em; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -/* Docutils 0.18+ (footnotes & citations) */ -aside.footnote > span, -div.citation > span { - float: left; -} -aside.footnote > span:last-of-type, -div.citation > span:last-of-type { - padding-right: 0.5em; -} -aside.footnote > p { - margin-left: 2em; -} -div.citation > p { - margin-left: 4em; -} -aside.footnote > p:last-of-type, -div.citation > p:last-of-type { - margin-bottom: 0em; -} -aside.footnote > p:last-of-type:after, -div.citation > p:last-of-type:after { - content: ""; - clear: both; -} - -/* Footnotes & citations ends */ - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0 0.5em; - content: ":"; - display: inline-block; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; - white-space: nowrap; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -span.linenos, -div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; - -webkit-user-select: text; /* Safari fallback only */ - -webkit-user-select: none; /* Chrome/Safari */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* IE10+ */ -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } } \ No newline at end of file diff --git a/_static/debug.css b/_static/debug.css new file mode 100644 index 000000000..74d4aec33 --- /dev/null +++ b/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/_static/doctools.js b/_static/doctools.js index c3db08d1c..d06a71d75 100644 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -4,12 +4,19 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ "use strict"; +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + const _ready = (callback) => { if (document.readyState !== "loading") { callback(); @@ -18,73 +25,11 @@ const _ready = (callback) => { } }; -/** - * highlight a given string on a node by wrapping it in - * span elements with the given class name. - */ -const _highlight = (node, addItems, text, className) => { - if (node.nodeType === Node.TEXT_NODE) { - const val = node.nodeValue; - const parent = node.parentNode; - const pos = val.toLowerCase().indexOf(text); - if ( - pos >= 0 && - !parent.classList.contains(className) && - !parent.classList.contains("nohighlight") - ) { - let span; - - const closestNode = parent.closest("body, svg, foreignObject"); - const isInSVG = closestNode && closestNode.matches("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.classList.add(className); - } - - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - parent.insertBefore( - span, - parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling - ) - ); - node.nodeValue = val.substr(0, pos); - - if (isInSVG) { - const rect = document.createElementNS( - "http://www.w3.org/2000/svg", - "rect" - ); - const bbox = parent.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute("class", className); - addItems.push({ parent: parent, target: rect }); - } - } - } else if (node.matches && !node.matches("button, select, textarea")) { - node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); - } -}; -const _highlightText = (thisNode, text, className) => { - let addItems = []; - _highlight(thisNode, addItems, text, className); - addItems.forEach((obj) => - obj.parent.insertAdjacentElement("beforebegin", obj.target) - ); -}; - /** * Small JavaScript module for the documentation. */ const Documentation = { init: () => { - Documentation.highlightSearchWords(); Documentation.initDomainIndexTable(); Documentation.initOnKeyListeners(); }, @@ -126,51 +71,6 @@ const Documentation = { Documentation.LOCALE = catalog.locale; }, - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords: () => { - const highlight = - new URLSearchParams(window.location.search).get("highlight") || ""; - const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); - if (terms.length === 0) return; // nothing to do - - // There should never be more than one element matching "div.body" - const divBody = document.querySelectorAll("div.body"); - const body = divBody.length ? divBody[0] : document.querySelector("body"); - window.setTimeout(() => { - terms.forEach((term) => _highlightText(body, term, "highlighted")); - }, 10); - - const searchBox = document.getElementById("searchbox"); - if (searchBox === null) return; - searchBox.appendChild( - document - .createRange() - .createContextualFragment( - '" - ) - ); - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords: () => { - document - .querySelectorAll("#searchbox .highlight-link") - .forEach((el) => el.remove()); - document - .querySelectorAll("span.highlighted") - .forEach((el) => el.classList.remove("highlighted")); - const url = new URL(window.location); - url.searchParams.delete("highlight"); - window.history.replaceState({}, "", url); - }, - /** * helper function to focus on search bar */ @@ -210,15 +110,11 @@ const Documentation = { ) return; - const blacklistedElements = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", - ]); document.addEventListener("keydown", (event) => { - if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements - if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; if (!event.shiftKey) { switch (event.key) { @@ -240,10 +136,6 @@ const Documentation = { event.preventDefault(); } break; - case "Escape": - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; - Documentation.hideSearchWords(); - event.preventDefault(); } } diff --git a/_static/documentation_options.js b/_static/documentation_options.js index fa84404f0..96fcefe3e 100644 --- a/_static/documentation_options.js +++ b/_static/documentation_options.js @@ -1,14 +1,14 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '1.11.2', - LANGUAGE: 'en', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: true, - SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: false, +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '1.14.0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, }; \ No newline at end of file diff --git a/_static/graphviz.css b/_static/graphviz.css index 19e7afd38..8d81c02ed 100644 --- a/_static/graphviz.css +++ b/_static/graphviz.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- graphviz extension. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/_static/jquery-3.6.0.js b/_static/jquery-3.6.0.js deleted file mode 100644 index fc6c299b7..000000000 --- a/_static/jquery-3.6.0.js +++ /dev/null @@ -1,10881 +0,0 @@ -/*! - * jQuery JavaScript Library v3.6.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright OpenJS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2021-03-02T17:08Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 - // Plus for old WebKit, typeof returns "function" for HTML collections - // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) - return typeof obj === "function" && typeof obj.nodeType !== "number" && - typeof obj.item !== "function"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.6.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), - function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - } ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.6 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2021-02-16 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem && elem.namespaceURI, - docElem = elem && ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -} -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the primary Deferred - primary = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - primary.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( primary.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return primary.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); - } - - return primary.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - - // Support: Chrome 86+ - // In Chrome, if an element having a focusout handler is blurred by - // clicking outside of it, it invokes the handler synchronously. If - // that handler calls `.remove()` on the element, the data is cleared, - // leaving `result` undefined. We need to guard against this. - return result && result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - which: true -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - // Suppress native focus or blur as it's already being fired - // in leverageNative. - _default: function() { - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - // - // Support: Firefox 70+ - // Only Firefox includes border widths - // in computed dimensions. (gh-4529) - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; - tr.style.cssText = "border:1px solid"; - - // Support: Chrome 86+ - // Height set through cssText does not get applied. - // Computed height then comes back as 0. - tr.style.height = "1px"; - trChild.style.height = "9px"; - - // Support: Android 8 Chrome 86+ - // In our bodyBackground.html iframe, - // display for all div elements is set to "inline", - // which causes a problem only in Android 8 Chrome 86. - // Ensuring the div is display: block - // gets around this issue. - trChild.style.display = "block"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + - parseInt( trStyle.borderTopWidth, 10 ) + - parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml, parserErrorElem; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) {} - - parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; - if ( !xml || parserErrorElem ) { - jQuery.error( "Invalid XML: " + ( - parserErrorElem ? - jQuery.map( parserErrorElem.childNodes, function( el ) { - return el.textContent; - } ).join( "\n" ) : - data - ) ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ).filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ).map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - -originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script but not if jsonp - if ( !isSuccess && - jQuery.inArray( "script", s.dataTypes ) > -1 && - jQuery.inArray( "json", s.dataTypes ) < 0 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -{% endmacro %} \ No newline at end of file diff --git a/api_summary.html b/api_summary.html index 53188c4eb..0b5c0c186 100644 --- a/api_summary.html +++ b/api_summary.html @@ -1,1006 +1,1116 @@ - - - - - - - - - API Summary — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - - - - - - - -
- -
- -
-

API Summary#

-

Summary of public functions and classes exposed -in scikit-onnx.

- -
-

Version#

-
-
-skl2onnx.get_latest_tested_opset_version()[source]#
-

This module relies on onnxruntime to test every -converter. The function returns the most recent -target opset tested with onnxruntime or the opset -version specified by onnx package if this one is lower -(return by onnx.defs.onnx_opset_version()).

-
- -
-
-

Converters#

-

Both functions convert a scikit-learn model into ONNX. -The first one lets the user manually -define the input’s name and types. The second one -infers this information from the training data. -These two functions are the main entry points to converter. -The rest of the API is needed if a model has no converter -implemented in this package. A new converter has then to be -registered, whether it is imported from another package -or created from scratch.

-
-
-skl2onnx.convert_sklearn(model, name=None, initial_types=None, doc_string='', target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, custom_parsers=None, options=None, intermediate=False, white_op=None, black_op=None, final_types=None, dtype=None, naming=None, verbose=0)[source]#
-

This function produces an equivalent -ONNX model of the given scikit-learn model. -The supported converters is returned by function -supported_converters.

-

For pipeline conversion, user needs to make sure each component -is one of our supported items. -This function converts the specified scikit-learn model -into its ONNX counterpart. -Note that for all conversions, initial types are required. -ONNX model name can also be specified.

-
-
Parameters
-
    -
  • model – A scikit-learn model

  • -
  • initial_types – a python list. -Each element is a tuple of a variable name -and a type defined in data_types.py

  • -
  • name – The name of the graph (type: GraphProto) -in the produced ONNX model (type: ModelProto)

  • -
  • doc_string – A string attached onto the produced ONNX model

  • -
  • target_opset – number, for example, 7 for -ONNX 1.2, and 8 for ONNX 1.3, -if value is not specified, the function will -choose the latest tested opset -(see skl2onnx.get_latest_tested_opset_version())

  • -
  • custom_conversion_functions – a dictionary for -specifying the user customized conversion function, -it takes precedence over registered converters

  • -
  • custom_shape_calculators – a dictionary for -specifying the user customized shape calculator -it takes precedence over registered shape calculators.

  • -
  • custom_parsers – parsers determines which outputs -is expected for which particular task, -default parsers are defined for classifiers, -regressors, pipeline but they can be rewritten, -custom_parsers is a dictionary -{ type: fct_parser(scope, model, inputs, custom_parsers=None) }

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • intermediate – if True, the function returns the -converted model and the instance of Topology used, -it returns the converted model otherwise

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, -if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes -allowed while converting a pipeline, -if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • dtype – removed in version 1.7.5, dtype is -now inferred from input types, -converters may add operators Cast to switch -to double when it is necessary

  • -
  • naming – the user may want to change the way intermediate -are named, this parameter can be a string (a prefix) or a -function, which signature is the following: -get_name(name, existing_names), the library will then -check this name is unique and modify it if not

  • -
  • verbose – display progress while converting a model

  • -
-
-
Returns
-

An ONNX model (type: ModelProto) which is -equivalent to the input scikit-learn model

-
-
-

Example of initial_types: -Assume that the specified scikit-learn model takes -a heterogeneous list as its input. -If the first 5 elements are floats and the last 10 elements are integers, -we need to specify initial types as below. The [None] in -[None, 5] indicates the batch size here is unknown.

-
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
-initial_type = [('float_input', FloatTensorType([None, 5])),
-                ('int64_input', Int64TensorType([None, 10]))]
-
-
-
-

Note

-

If a pipeline includes an instance of -ColumnTransformer, -scikit-learn allow the user to specify columns by names. -This option is not supported -by sklearn-onnx as features names could be different -in input data and the ONNX graph -(defined by parameter initial_types), only integers are supported.

-
-

Some ONNX operators exposes parameters sklearn-onnx cannot -guess from the raw model. Some default values are usually suggested -but the users may have to manually overwrite them. This need -is not obvious to do when a model is included in a pipeline. -That’s why these options can be given to function convert_sklearn -as a dictionary {model_type: parameters in a dictionary} or -{model_id: parameters in a dictionary}. -Option sep is used to specify the delimiters between two words -when the ONNX graph needs to tokenize a string. -The default value is short and may not include all -the necessary values. It can be overwritten as:

-
extra = {TfidfVectorizer: {"separators": [' ', '[.]', '\\?',
-            ',', ';', ':', '\\!', '\\(', '\\)']}}
-model_onnx = convert_sklearn(
-    model, "tfidf",
-    initial_types=[("input", StringTensorType([None, 1]))],
-    options=extra)
-
-
-

But if a pipeline contains two model of the same class, -it is possible to distinguish between the two with function id:

-
extra = {id(model): {"separators": [' ', '.', '\\?', ',', ';',
-            ':', '\\!', '\\(', '\\)']}}
-model_onnx = convert_sklearn(
-    pipeline, "pipeline-with-2-tfidf",
-    initial_types=[("input", StringTensorType([None, 1]))],
-    options=extra)
-
-
-

It is used in example TfIdfVectorizer with ONNX.

-
-

Changed in version 1.10.0: Parameter naming was added.

-
-
- -
-
-skl2onnx.to_onnx(model, X=None, name=None, initial_types=None, target_opset=None, options=None, white_op=None, black_op=None, final_types=None, dtype=None, naming=None, verbose=0)[source]#
-

Calls convert_sklearn() with simplified parameters.

-
-
Parameters
-
    -
  • model – model to convert

  • -
  • X – training set, can be None, it is used to infered the -input types (initial_types)

  • -
  • initial_types – if X is None, then initial_types must be -defined

  • -
  • target_opset – conversion with a specific target opset

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • name – name of the model

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes allowed -while converting a pipeline, if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • dtype – removed in version 1.7.5, dtype is now inferred from -input types, converters may add operators Cast to switch to -double when it is necessary

  • -
  • naming – the user may want to change the way intermediate -are named, this parameter can be a string (a prefix) or a -function, which signature is the following: -get_name(name, existing_names), the library will then -check this name is unique and modify it if not

  • -
  • verbose – display progress while converting a model

  • -
-
-
Returns
-

converted model

-
-
-

This function checks if the model inherits from class -OnnxOperatorMixin, it calls method to_onnx -in that case otherwise it calls convert_sklearn().

-
-

Changed in version 1.10.0: Parameter naming was added.

-
-
- -
-
-

Logging#

-

The conversion of a pipeline fails if it contains an object without any -associated converter. It may also fails if one of the object is mapped -by a custom converter. If the error message is not explicit enough, -it is possible to enable logging:

-
import logging
-logger = logging.getLogger('skl2onnx')
-logger.setLevel(logging.DEBUG)
-logging.basicConfig(level=logging.DEBUG)
-
-
-

Example Logging, verbose illustrates what it looks like.

-
-
-

Register a new converter#

-

If a model has no converter -implemented in this package, a new converter has then to be -registered, whether it is imported from another package -or created from scratch. Section Covered Converters -lists all available converters.

-
-
-skl2onnx.supported_converters(from_sklearn=False)[source]#
-

Returns the list of supported converters. -To find the converter associated to a specific model, -the library gets the name of the model class, -adds 'Sklearn' as a prefix and retrieves -the associated converter if available.

-
-
Parameters
-

from_sklearn – every supported model is mapped to converter -by a name prefixed with 'Sklearn', the prefix is removed -if this parameter is False but the function only returns converters -whose name is prefixed by 'Sklearn'

-
-
Returns
-

list of supported models as string

-
-
-
- -
-
-skl2onnx.update_registered_converter(model, alias, shape_fct, convert_fct, overwrite=True, parser=None, options=None)[source]#
-

Registers or updates a converter for a new model so that -it can be converted when inserted in a scikit-learn pipeline.

-
-
Parameters
-
    -
  • model – model class

  • -
  • alias – alias used to register the model

  • -
  • shape_fct – function which checks or modifies the expected -outputs, this function should be fast so that the whole graph -can be computed followed by the conversion of each model, -parallelized or not

  • -
  • convert_fct – function which converts a model

  • -
  • overwrite – False to raise exception if a converter -already exists

  • -
  • parser – overwrites the parser as well if not empty

  • -
  • options – registered options for this converter

  • -
-
-
-

The alias is usually the library name followed by the model name. -Example:

-
from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes
-from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier
-from skl2onnx import update_registered_converter
-update_registered_converter(
-        SGDClassifier, 'SklearnLinearClassifier',
-        calculate_linear_classifier_output_shapes,
-        convert_sklearn_random_forest_classifier,
-        options={'zipmap': [True, False, 'columns'],
-                 'output_class_labels': [False, True],
-                 'raw_scores': [True, False]})
-
-
-

The function does not update the parser if not specified except if -option ‘zipmap’ is added to the list. Every classifier -must declare this option to let the default parser -automatically handle that option.

-
- -
-
-skl2onnx.update_registered_parser(model, parser_fct)[source]#
-

Registers or updates a parser for a new model. -A parser returns the expected output of a model.

-
-
Parameters
-
    -
  • model – model class

  • -
  • parser_fct – parser, signature is the same as -parse_sklearn

  • -
-
-
-
- -
-
-

Manipulate ONNX graphs#

-
-
-skl2onnx.helpers.onnx_helper.enumerate_model_node_outputs(model, add_node=False)[source]#
-

Enumerates all the nodes of a model.

-
-
Parameters
-
    -
  • model – ONNX graph

  • -
  • add_node – if False, the function enumerates -all output names from every node, otherwise, it -enumerates tuple (output name, node)

  • -
-
-
Returns
-

enumerator

-
-
-
- -
-
-skl2onnx.helpers.onnx_helper.load_onnx_model(onnx_file_or_bytes)[source]#
-

Loads an ONNX file.

-
-
Parameters
-

onnx_file_or_bytesONNX file or bytes

-
-
Returns
-

ONNX model

-
-
-
- -
-
-skl2onnx.helpers.onnx_helper.select_model_inputs_outputs(model, outputs=None, inputs=None)[source]#
-

Takes a model and changes its outputs.

-
-
Parameters
-
    -
  • modelONNX model

  • -
  • inputs – new inputs

  • -
  • outputs – new outputs

  • -
-
-
Returns
-

modified model

-
-
-

The function removes unneeded files.

-
- -
-
-skl2onnx.helpers.onnx_helper.save_onnx_model(model, filename=None)[source]#
-

Saves a model as a file or bytes.

-
-
Parameters
-
    -
  • modelONNX model

  • -
  • filename – filename or None to return bytes

  • -
-
-
Returns
-

bytes

-
-
-
- -
-
-

Parsers#

-
-
-skl2onnx._parse.parse_sklearn(scope, model, inputs, custom_parsers=None, final_types=None)[source]#
-

This is a delegate function. It does nothing but invokes the -correct parsing function according to the input model’s type.

-
-
Parameters
-
    -
  • scope – Scope object

  • -
  • model – A scikit-learn object (e.g., OneHotEncoder -and LogisticRegression)

  • -
  • inputs – A list of variables

  • -
  • custom_parsers – parsers determines which outputs is expected -for which particular task, default parsers are defined for -classifiers, regressors, pipeline but they can be rewritten, -custom_parsers is a dictionary { type: fct_parser(scope, -model, inputs, custom_parsers=None) }

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
-
-
Returns
-

The output variables produced by the input model

-
-
-
- -
-
-skl2onnx._parse.parse_sklearn_model(model, initial_types=None, target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, custom_parsers=None, options=None, white_op=None, black_op=None, final_types=None, naming=None)[source]#
-

Puts scikit-learn object into an abstract container so that -our framework can work seamlessly on models created -with different machine learning tools.

-
-
Parameters
-
    -
  • model – A scikit-learn model

  • -
  • initial_types – a python list. Each element is a tuple of a -variable name and a type defined in data_types.py

  • -
  • target_opset – number, for example, 7 for ONNX 1.2, -and 8 for ONNX 1.3.

  • -
  • custom_conversion_functions – a dictionary for specifying -the user customized conversion function if not registered

  • -
  • custom_shape_calculators – a dictionary for specifying the -user customized shape calculator if not registered

  • -
  • custom_parsers – parsers determines which outputs is expected -for which particular task, default parsers are defined for -classifiers, regressors, pipeline but they can be rewritten, -custom_parsers is a dictionary -{ type: fct_parser(scope, model, inputs, custom_parsers=None) }

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes allowed -while converting a pipeline, if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • naming – the user may want to change the way intermediate -are named, this parameter can be a string (a prefix) or a -function, which signature is the following: -get_name(name, existing_names), the library will then -check this name is unique and modify it if not

  • -
-
-
Returns
-

Topology

-
-
-
-

Changed in version 1.10.0: Parameter naming was added.

-
-
- -
-
-

Utils for contributors#

-
-
-skl2onnx.common.utils.check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None)[source]#
-

Check if the number of input(s)/output(s) is correct

-
-
Parameters
-
    -
  • operator – A Operator object

  • -
  • input_count_range – A list of two integers or an integer. If it’s a list the first/second element is the

  • -
-
-
-

minimal/maximal number of inputs. If it’s an integer, it is equivalent to specify that number twice in a list. For -infinite ranges like 5 to infinity, you need to use [5, None]. -:param output_count_range: A list of two integers or an integer. See input_count_range for its format.

-
- -
-
-skl2onnx.common.utils.check_input_and_output_types(operator, good_input_types=None, good_output_types=None)[source]#
-

Check if the type(s) of input(s)/output(s) is(are) correct

-
-
Parameters
-
    -
  • operator – A Operator object

  • -
  • good_input_types – A list of allowed input types (e.g., [FloatTensorType, Int64TensorType]) or None. None

  • -
-
-
-

means that we skip the check of the input types. -:param good_output_types: A list of allowed output types. See good_input_types for its format.

-
- -
-
-

Concepts#

-
-

Containers#

-
-
-class skl2onnx.common._container.SklearnModelContainerNode(sklearn_model, white_op=None, black_op=None, verbose=0)[source]#
-

Main container for one scikit-learn model. -Every converter adds nodes to an existing container -which is converted into a ONNX graph by an instance of -Topology.

-
-
-property input_names#
-

This function should return a list of strings. Each string -corresponds to an input variable name. -:return: a list of string

-
- -
-
-property output_names#
-

This function should return a list of strings. Each string -corresponds to an output variable name. -:return: a list of string

-
- -
- -
-
-class skl2onnx.common._container.ModelComponentContainer(target_opset, options=None, registered_models=None, white_op=None, black_op=None, verbose=0)[source]#
-

In the conversion phase, this class is used to collect all materials -required to build an ONNX GraphProto, which is encapsulated in a -ONNX ModelProto.

-
-
-add_initializer(name, onnx_type, shape, content)[source]#
-

Adds a TensorProto into the initializer list of the final -ONNX model.

-
-
Parameters
-
    -
  • name – Variable name in the produced ONNX model.

  • -
  • onnx_type – Element types allowed in ONNX tensor, e.g., -TensorProto.FLOAT and TensorProto.STRING.

  • -
  • shape – Tensor shape, a list of integers.

  • -
  • content – Flattened tensor values (i.e., a float list -or a float array).

  • -
-
-
Returns
-

created tensor

-
-
-
- -
-
-add_input(variable)[source]#
-

Adds our Variable object defined _parser.py into the the input -list of the final ONNX model.

-
-
Parameters
-

variable – The Variable object to be added

-
-
-
- -
-
-add_node(op_type, inputs, outputs, op_domain='', op_version=None, name=None, **attrs)[source]#
-

Adds a NodeProto into the node list of the final ONNX model. -If the input operator’s domain-version information cannot be -found in our domain-version pool (a Python set), we may add it.

-
-
Parameters
-
    -
  • op_type – A string (e.g., Pool and Conv) indicating the -type of the NodeProto

  • -
  • inputs – A list of strings. They are the input variables’ -names of the considered NodeProto

  • -
  • outputs – A list of strings. They are the output -variables’ names of the considered NodeProto

  • -
  • op_domain – The domain name (e.g., ai.onnx.ml) of the -operator we are trying to add.

  • -
  • op_version – The version number (e.g., 0 and 1) of the -operator we are trying to add.

  • -
  • name – name of the node, this name cannot be empty

  • -
  • attrs – A Python dictionary. Keys and values are -attributes’ names and attributes’ values, -respectively.

  • -
-
-
-
- -
-
-add_output(variable)[source]#
-

Adds our Variable object defined _parser.py into the the -output list of the final ONNX model.

-
-
Parameters
-

variable – The Variable object to be added

-
-
-
- -
- -
-
-

Nodes#

-
-
-class skl2onnx.common._topology.Operator(onnx_name, scope, type, raw_operator, target_opset, scope_inst)[source]#
-

Defines an operator available in ONNX.

-
- -
-
-class skl2onnx.common._topology.Variable(raw_name, onnx_name, scope, type=None)[source]#
-

Defines a variable which holds any data defined -from ONNX types.

-
- -
-
-

Scope#

-
-
-class skl2onnx.common._topology.Scope(name, target_opset=None, custom_shape_calculators=None, options=None, registered_models=None, naming=None)[source]#
-

Every node of an ONNX graph must be unique. This class holds the list -of existing name for every node already defined in graph. It also -provides functions to create a unique unused name.

-
-
-get_unique_operator_name(seed)[source]#
-

Creates a unique operator ID based on the given seed.

-
- -
-
-get_unique_variable_name(seed, rename=True)[source]#
-

Creates a unique variable ID based on the given seed.

-
- -
- -
-
-

Topology#

-
-
-class skl2onnx.common._topology.Topology(model, default_batch_size=1, initial_types=None, target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, registered_models=None)[source]#
-

Holds instances on Scope and -SklearnModelContainer. -These are filled by the converters while a pipeline is being converted.

-
- -
-
-skl2onnx.common._topology.convert_topology(topology, model_name, doc_string, target_opset, channel_first_inputs=None, options=None, remove_identity=True, verbose=0)[source]#
-

This function is used to convert our Topology object defined in -_parser.py into a ONNX model (type: ModelProto).

-
-
Parameters
-
    -
  • topology – The Topology object we are going to convert

  • -
  • model_name – GraphProto’s name. Let “model” denote the -returned model. The string “model_name” would be -assigned to “model.graph.name.”

  • -
  • doc_string – A string attached to the produced model

  • -
  • target_opset – number or dictionary, -for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, -a dictionary is used to indicate different opset for -different domains

  • -
  • options – see Converters with options

  • -
  • remove_identity – removes identity nodes -include ‘1.1.2’, ‘1.2’, and so on.

  • -
  • verbose – displays information while converting

  • -
-
-
Returns
-

a ONNX ModelProto

-
-
-
- -
-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + API Summary - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

API Summary#

+

Summary of public functions and classes exposed +in scikit-onnx.

+
+

Version#

+
+
+skl2onnx.get_latest_tested_opset_version()[source]#
+

This module relies on onnxruntime to test every +converter. The function returns the most recent +target opset tested with onnxruntime or the opset +version specified by onnx package if this one is lower +(return by onnx.defs.onnx_opset_version()).

+
+ +
+
+

Converters#

+

Both functions convert a scikit-learn model into ONNX. +The first one lets the user manually +define the input’s name and types. The second one +infers this information from the training data. +These two functions are the main entry points to converter. +The rest of the API is needed if a model has no converter +implemented in this package. A new converter has then to be +registered, whether it is imported from another package +or created from scratch.

+
+
+skl2onnx.convert_sklearn(model, name=None, initial_types=None, doc_string='', target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, custom_parsers=None, options=None, intermediate=False, white_op=None, black_op=None, final_types=None, dtype=None, naming=None, model_optim=True, verbose=0)[source]#
+

This function produces an equivalent +ONNX model of the given scikit-learn model. +The supported converters is returned by function +supported_converters.

+

For pipeline conversion, user needs to make sure each component +is one of our supported items. +This function converts the specified scikit-learn model +into its ONNX counterpart. +Note that for all conversions, initial types are required. +ONNX model name can also be specified.

+
+
Parameters:
+
    +
  • model – A scikit-learn model

  • +
  • initial_types – a python list. +Each element is a tuple of a variable name +and a type defined in data_types.py

  • +
  • name – The name of the graph (type: GraphProto) +in the produced ONNX model (type: ModelProto)

  • +
  • doc_string – A string attached onto the produced ONNX model

  • +
  • target_opset – number, for example, 7 for +ONNX 1.2, and 8 for ONNX 1.3, +if value is not specified, the function will +choose the latest tested opset +(see skl2onnx.get_latest_tested_opset_version())

  • +
  • custom_conversion_functions – a dictionary for +specifying the user customized conversion function, +it takes precedence over registered converters

  • +
  • custom_shape_calculators – a dictionary for +specifying the user customized shape calculator +it takes precedence over registered shape calculators.

  • +
  • custom_parsers – parsers determines which outputs +is expected for which particular task, +default parsers are defined for classifiers, +regressors, pipeline but they can be rewritten, +custom_parsers is a dictionary +{ type: fct_parser(scope, model, inputs, custom_parsers=None) }

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • intermediate – if True, the function returns the +converted model and the instance of Topology used, +it returns the converted model otherwise

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, +if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes +allowed while converting a pipeline, +if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • dtype – removed in version 1.7.5, dtype is +now inferred from input types, +converters may add operators Cast to switch +to double when it is necessary

  • +
  • naming – the user may want to change the way intermediate +are named, this parameter can be a string (a prefix) or a +function, which signature is the following: +get_name(name, existing_names), the library will then +check this name is unique and modify it if not

  • +
  • model_optim – enable or disable model optimisation +after the model was converted into onnx, it reduces the number +of identity nodes

  • +
  • verbose – display progress while converting a model

  • +
+
+
Returns:
+

An ONNX model (type: ModelProto) which is +equivalent to the input scikit-learn model

+
+
+

Example of initial_types: +Assume that the specified scikit-learn model takes +a heterogeneous list as its input. +If the first 5 elements are floats and the last 10 elements are integers, +we need to specify initial types as below. The [None] in +[None, 5] indicates the batch size here is unknown.

+
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
+initial_type = [('float_input', FloatTensorType([None, 5])),
+                ('int64_input', Int64TensorType([None, 10]))]
+
+
+
+

Note

+

If a pipeline includes an instance of +ColumnTransformer, +scikit-learn allow the user to specify columns by names. +This option is not supported +by sklearn-onnx as features names could be different +in input data and the ONNX graph +(defined by parameter initial_types), only integers are supported.

+
+
+

Converters options#

+

Some ONNX operators exposes parameters sklearn-onnx cannot +guess from the raw model. Some default values are usually suggested +but the users may have to manually overwrite them. This need +is not obvious to do when a model is included in a pipeline. +That’s why these options can be given to function convert_sklearn +as a dictionary {model_type: parameters in a dictionary} or +{model_id: parameters in a dictionary}. +Option sep is used to specify the delimiters between two words +when the ONNX graph needs to tokenize a string. +The default value is short and may not include all +the necessary values. It can be overwritten as:

+
extra = {TfidfVectorizer: {"separators": [' ', '[.]', '\\?',
+            ',', ';', ':', '\\!', '\\(', '\\)']}}
+model_onnx = convert_sklearn(
+    model, "tfidf",
+    initial_types=[("input", StringTensorType([None, 1]))],
+    options=extra)
+
+
+

But if a pipeline contains two model of the same class, +it is possible to distinguish between the two with function id:

+
extra = {id(model): {"separators": [' ', '.', '\\?', ',', ';',
+            ':', '\\!', '\\(', '\\)']}}
+model_onnx = convert_sklearn(
+    pipeline, "pipeline-with-2-tfidf",
+    initial_types=[("input", StringTensorType([None, 1]))],
+    options=extra)
+
+
+

It is used in example TfIdfVectorizer with ONNX.

+
+

Changed in version 1.10.0: Parameter naming was added.

+
+
+
+ +
+
+skl2onnx.to_onnx(model, X=None, name=None, initial_types=None, target_opset=None, options=None, white_op=None, black_op=None, final_types=None, dtype=None, naming=None, model_optim=True, verbose=0)[source]#
+

Calls convert_sklearn() with simplified parameters.

+
+
Parameters:
+
    +
  • model – model to convert

  • +
  • X – training set, can be None, it is used to infered the +input types (initial_types)

  • +
  • initial_types – if X is None, then initial_types must be +defined

  • +
  • target_opset – conversion with a specific target opset

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • name – name of the model

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes allowed +while converting a pipeline, if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • dtype – removed in version 1.7.5, dtype is now inferred from +input types, converters may add operators Cast to switch to +double when it is necessary

  • +
  • naming – the user may want to change the way intermediate +are named, this parameter can be a string (a prefix) or a +function, which signature is the following: +get_name(name, existing_names), the library will then +check this name is unique and modify it if not

  • +
  • model_optim – enable or disable model optimisation +after the model was converted into onnx, it reduces the number +of identity nodes

  • +
  • verbose – display progress while converting a model

  • +
+
+
Returns:
+

converted model

+
+
+

This function checks if the model inherits from class +OnnxOperatorMixin, it calls method to_onnx +in that case otherwise it calls convert_sklearn().

+
+

Changed in version 1.10.0: Parameter naming was added.

+
+
+ +
+
+

Logging#

+

The conversion of a pipeline fails if it contains an object without any +associated converter. It may also fails if one of the object is mapped +by a custom converter. If the error message is not explicit enough, +it is possible to enable logging:

+
import logging
+logger = logging.getLogger('skl2onnx')
+logger.setLevel(logging.DEBUG)
+logging.basicConfig(level=logging.DEBUG)
+
+
+

Example Logging, verbose illustrates what it looks like.

+
+
+

Register a new converter#

+

If a model has no converter +implemented in this package, a new converter has then to be +registered, whether it is imported from another package +or created from scratch. Section Covered Converters +lists all available converters.

+
+
+skl2onnx.supported_converters(from_sklearn=False)[source]#
+

Returns the list of supported converters. +To find the converter associated to a specific model, +the library gets the name of the model class, +adds 'Sklearn' as a prefix and retrieves +the associated converter if available.

+
+
Parameters:
+

from_sklearn – every supported model is mapped to converter +by a name prefixed with 'Sklearn', the prefix is removed +if this parameter is False but the function only returns converters +whose name is prefixed by 'Sklearn'

+
+
Returns:
+

list of supported models as string

+
+
+
+ +
+
+skl2onnx.update_registered_converter(model, alias, shape_fct, convert_fct, overwrite=True, parser=None, options=None)[source]#
+

Registers or updates a converter for a new model so that +it can be converted when inserted in a scikit-learn pipeline.

+
+
Parameters:
+
    +
  • model – model class

  • +
  • alias – alias used to register the model

  • +
  • shape_fct – function which checks or modifies the expected +outputs, this function should be fast so that the whole graph +can be computed followed by the conversion of each model, +parallelized or not

  • +
  • convert_fct – function which converts a model

  • +
  • overwrite – False to raise exception if a converter +already exists

  • +
  • parser – overwrites the parser as well if not empty

  • +
  • options – registered options for this converter

  • +
+
+
+

The alias is usually the library name followed by the model name. +Example:

+
from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes
+from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier
+from skl2onnx import update_registered_converter
+update_registered_converter(
+        SGDClassifier, 'SklearnLinearClassifier',
+        calculate_linear_classifier_output_shapes,
+        convert_sklearn_random_forest_classifier,
+        options={'zipmap': [True, False, 'columns'],
+                 'output_class_labels': [False, True],
+                 'raw_scores': [True, False]})
+
+
+

The function does not update the parser if not specified except if +option ‘zipmap’ is added to the list. Every classifier +must declare this option to let the default parser +automatically handle that option.

+
+ +
+
+skl2onnx.update_registered_parser(model, parser_fct)[source]#
+

Registers or updates a parser for a new model. +A parser returns the expected output of a model.

+
+
Parameters:
+
    +
  • model – model class

  • +
  • parser_fct – parser, signature is the same as +parse_sklearn

  • +
+
+
+
+ +
+
+

Manipulate ONNX graphs#

+
+
+skl2onnx.helpers.onnx_helper.enumerate_model_node_outputs(model, add_node=False)[source]#
+

Enumerates all the nodes of a model.

+
+
Parameters:
+
    +
  • model – ONNX graph

  • +
  • add_node – if False, the function enumerates +all output names from every node, otherwise, it +enumerates tuple (output name, node)

  • +
+
+
Returns:
+

enumerator

+
+
+
+ +
+
+skl2onnx.helpers.onnx_helper.load_onnx_model(onnx_file_or_bytes)[source]#
+

Loads an ONNX file.

+
+
Parameters:
+

onnx_file_or_bytesONNX file or bytes

+
+
Returns:
+

ONNX model

+
+
+
+ +
+
+skl2onnx.helpers.onnx_helper.select_model_inputs_outputs(model, outputs=None, inputs=None)[source]#
+

Takes a model and changes its outputs.

+
+
Parameters:
+
    +
  • modelONNX model

  • +
  • inputs – new inputs

  • +
  • outputs – new outputs

  • +
+
+
Returns:
+

modified model

+
+
+

The function removes unneeded files.

+
+ +
+
+skl2onnx.helpers.onnx_helper.save_onnx_model(model, filename=None)[source]#
+

Saves a model as a file or bytes.

+
+
Parameters:
+
    +
  • modelONNX model

  • +
  • filename – filename or None to return bytes

  • +
+
+
Returns:
+

bytes

+
+
+
+ +
+
+

Parsers#

+
+
+skl2onnx._parse.parse_sklearn(scope, model, inputs, custom_parsers=None, final_types=None)[source]#
+

This is a delegate function. It does nothing but invokes the +correct parsing function according to the input model’s type.

+
+
Parameters:
+
    +
  • scope – Scope object

  • +
  • model – A scikit-learn object (e.g., OneHotEncoder +and LogisticRegression)

  • +
  • inputs – A list of variables

  • +
  • custom_parsers – parsers determines which outputs is expected +for which particular task, default parsers are defined for +classifiers, regressors, pipeline but they can be rewritten, +custom_parsers is a dictionary { type: fct_parser(scope, +model, inputs, custom_parsers=None) }

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
+
+
Returns:
+

The output variables produced by the input model

+
+
+
+ +
+
+skl2onnx._parse.parse_sklearn_model(model, initial_types=None, target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, custom_parsers=None, options=None, white_op=None, black_op=None, final_types=None, naming=None)[source]#
+

Puts scikit-learn object into an abstract container so that +our framework can work seamlessly on models created +with different machine learning tools.

+
+
Parameters:
+
    +
  • model – A scikit-learn model

  • +
  • initial_types – a python list. Each element is a tuple of a +variable name and a type defined in data_types.py

  • +
  • target_opset – number, for example, 7 for ONNX 1.2, +and 8 for ONNX 1.3.

  • +
  • custom_conversion_functions – a dictionary for specifying +the user customized conversion function if not registered

  • +
  • custom_shape_calculators – a dictionary for specifying the +user customized shape calculator if not registered

  • +
  • custom_parsers – parsers determines which outputs is expected +for which particular task, default parsers are defined for +classifiers, regressors, pipeline but they can be rewritten, +custom_parsers is a dictionary +{ type: fct_parser(scope, model, inputs, custom_parsers=None) }

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes allowed +while converting a pipeline, if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • naming – the user may want to change the way intermediate +are named, this parameter can be a string (a prefix) or a +function, which signature is the following: +get_name(name, existing_names), the library will then +check this name is unique and modify it if not

  • +
+
+
Returns:
+

Topology

+
+
+
+

Changed in version 1.10.0: Parameter naming was added.

+
+
+ +
+
+

Utils for contributors#

+
+
+skl2onnx.common.utils.check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None)[source]#
+

Check if the number of input(s)/output(s) is correct

+
+
Parameters:
+
    +
  • operator – A Operator object

  • +
  • input_count_range – A list of two integers or an integer. If it’s a list the first/second element is the

  • +
+
+
+

minimal/maximal number of inputs. If it’s an integer, it is equivalent to specify that number twice in a list. For +infinite ranges like 5 to infinity, you need to use [5, None]. +:param output_count_range: A list of two integers or an integer. See input_count_range for its format.

+
+ +
+
+skl2onnx.common.utils.check_input_and_output_types(operator, good_input_types=None, good_output_types=None)[source]#
+

Check if the type(s) of input(s)/output(s) is(are) correct

+
+
Parameters:
+
    +
  • operator – A Operator object

  • +
  • good_input_types – A list of allowed input types (e.g., [FloatTensorType, Int64TensorType]) or None. None

  • +
+
+
+

means that we skip the check of the input types. +:param good_output_types: A list of allowed output types. See good_input_types for its format.

+
+ +
+
+

Concepts#

+
+

Containers#

+
+
+class skl2onnx.common._container.SklearnModelContainerNode(sklearn_model, white_op=None, black_op=None, verbose=0)[source]#
+

Main container for one scikit-learn model. +Every converter adds nodes to an existing container +which is converted into a ONNX graph by an instance of +Topology.

+
+
+property input_names#
+

This function should return a list of strings. Each string +corresponds to an input variable name. +:return: a list of string

+
+ +
+
+property output_names#
+

This function should return a list of strings. Each string +corresponds to an output variable name. +:return: a list of string

+
+ +
+ +
+
+class skl2onnx.common._container.ModelComponentContainer(target_opset, options=None, registered_models=None, white_op=None, black_op=None, verbose=0)[source]#
+

In the conversion phase, this class is used to collect all materials +required to build an ONNX GraphProto, which is encapsulated in a +ONNX ModelProto.

+
+
+add_initializer(name, onnx_type, shape, content)[source]#
+

Adds a TensorProto into the initializer list of the final +ONNX model.

+
+
Parameters:
+
    +
  • name – Variable name in the produced ONNX model.

  • +
  • onnx_type – Element types allowed in ONNX tensor, e.g., +TensorProto.FLOAT and TensorProto.STRING.

  • +
  • shape – Tensor shape, a list of integers.

  • +
  • content – Flattened tensor values (i.e., a float list +or a float array).

  • +
+
+
Returns:
+

created tensor

+
+
+
+ +
+
+add_input(variable)[source]#
+

Adds our Variable object defined _parser.py into the the input +list of the final ONNX model.

+
+
Parameters:
+

variable – The Variable object to be added

+
+
+
+ +
+
+add_node(op_type, inputs, outputs, op_domain='', op_version=None, name=None, **attrs)[source]#
+

Adds a NodeProto into the node list of the final ONNX model. +If the input operator’s domain-version information cannot be +found in our domain-version pool (a Python set), we may add it.

+
+
Parameters:
+
    +
  • op_type – A string (e.g., Pool and Conv) indicating the +type of the NodeProto

  • +
  • inputs – A list of strings. They are the input variables’ +names of the considered NodeProto

  • +
  • outputs – A list of strings. They are the output +variables’ names of the considered NodeProto

  • +
  • op_domain – The domain name (e.g., ai.onnx.ml) of the +operator we are trying to add.

  • +
  • op_version – The version number (e.g., 0 and 1) of the +operator we are trying to add.

  • +
  • name – name of the node, this name cannot be empty

  • +
  • attrs – A Python dictionary. Keys and values are +attributes’ names and attributes’ values, +respectively.

  • +
+
+
+
+ +
+
+add_output(variable)[source]#
+

Adds our Variable object defined _parser.py into the the +output list of the final ONNX model.

+
+
Parameters:
+

variable – The Variable object to be added

+
+
+
+ +
+ +
+
+

Nodes#

+
+
+class skl2onnx.common._topology.Operator(onnx_name, scope, type, raw_operator, target_opset, scope_inst)[source]#
+

Defines an operator available in ONNX.

+
+ +
+
+class skl2onnx.common._topology.Variable(raw_name, onnx_name, scope, type=None)[source]#
+

Defines a variable which holds any data defined +from ONNX types.

+
+ +
+
+

Scope#

+
+
+class skl2onnx.common._topology.Scope(name, target_opset=None, custom_shape_calculators=None, options=None, registered_models=None, naming=None)[source]#
+

Every node of an ONNX graph must be unique. This class holds the list +of existing name for every node already defined in graph. It also +provides functions to create a unique unused name.

+
+
+get_unique_operator_name(seed)[source]#
+

Creates a unique operator ID based on the given seed.

+
+ +
+
+get_unique_variable_name(seed, rename=True)[source]#
+

Creates a unique variable ID based on the given seed.

+
+ +
+ +
+
+

Topology#

+
+
+class skl2onnx.common._topology.Topology(model, default_batch_size=1, initial_types=None, target_opset=None, custom_conversion_functions=None, custom_shape_calculators=None, registered_models=None)[source]#
+

Holds instances on Scope and +SklearnModelContainer. +These are filled by the converters while a pipeline is being converted.

+
+ +
+
+skl2onnx.common._topology.convert_topology(topology, model_name, doc_string, target_opset, channel_first_inputs=None, options=None, remove_identity=True, verbose=0)[source]#
+

This function is used to convert our Topology object defined in +_parser.py into a ONNX model (type: ModelProto).

+
+
Parameters:
+
    +
  • topology – The Topology object we are going to convert

  • +
  • model_name – GraphProto’s name. Let “model” denote the +returned model. The string “model_name” would be +assigned to “model.graph.name.”

  • +
  • doc_string – A string attached to the produced model

  • +
  • target_opset – number or dictionary, +for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, +a dictionary is used to indicate different opset for +different domains

  • +
  • options – see Converters with options

  • +
  • remove_identity – removes identity nodes +include ‘1.1.2’, ‘1.2’, and so on.

  • +
  • verbose – displays information while converting

  • +
+
+
Returns:
+

a ONNX ModelProto

+
+
+
+ +
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_examples/index.html b/auto_examples/index.html index f94ecdfee..4aa744c1b 100644 --- a/auto_examples/index.html +++ b/auto_examples/index.html @@ -1,518 +1,366 @@ + + + + + + - - - - - - - Gallery of examples — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - + + Gallery of examples - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - +
+
+ + + + +
+
+
+ + + + + Back to top + +
-
- - -
- - - -
- -
- -
- - +
+
- - - - - - -
- -
- - +
+
- + + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_backend.html b/auto_examples/plot_backend.html index 0c4e45543..e233a465c 100644 --- a/auto_examples/plot_backend.html +++ b/auto_examples/plot_backend.html @@ -1,370 +1,291 @@ + + + + + + - - - - - - - ONNX Runtime Backend for ONNX — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - + + ONNX Runtime Backend for ONNX - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - +
+
+ + + + +
+
+
+ + + + + Back to top + +
-
- - -
- - - -
- -
- -
- - +
+
- - - - - - -
- -
- - +
+ -
-

ONNX Runtime Backend for ONNX

+
+

ONNX Runtime Backend for ONNX#

ONNX Runtime extends the onnx backend API to run predictions using this runtime. @@ -377,88 +298,81 @@ from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression import numpy -from onnxruntime import get_device +from onnxruntime import get_device import numpy as np import onnxruntime.backend as backend

Let’s create an ONNX graph first.

data = load_iris()
-X, Y = data.data, data.target
-logreg = LogisticRegression(C=1e5).fit(X, Y)
-model = skl2onnx.to_onnx(logreg, X.astype(np.float32))
-name = "logreg_iris.onnx"
-with open(name, "wb") as f:
-    f.write(model.SerializeToString())
+X, Y = data.data, data.target
+logreg = LogisticRegression(C=1e5).fit(X, Y)
+model = skl2onnx.to_onnx(logreg, X.astype(np.float32))
+name = "logreg_iris.onnx"
+with open(name, "wb") as f:
+    f.write(model.SerializeToString())
 

Let’s use ONNX backend API to test it.

-
model = onnx.load(name)
-rep = backend.prepare(model, 'CPU')
-x = np.array([[-1.0, -2.0, 5.0, 6.0],
+
model = onnx.load(name)
+rep = backend.prepare(model, 'CPU')
+x = np.array([[-1.0, -2.0, 5.0, 6.0],
               [-1.0, -2.0, -3.0, -4.0],
               [-1.0, -2.0, 7.0, 8.0]],
-             dtype=np.float32)
-label, proba = rep.run(x)
-print("label={}".format(label))
-print("probabilities={}".format(proba))
+             dtype=np.float32)
+label, proba = rep.run(x)
+print("label={}".format(label))
+print("probabilities={}".format(proba))
 
-

Out:

label=[2 1 2]
-probabilities=[{0: 0.0, 1: 0.0, 2: 1.0}, {0: 0.04737764596939087, 1: 0.9526224136352539, 2: 0.0}, {0: 0.0, 1: 0.0, 2: 1.0}]
+probabilities=[{0: 0.0, 1: 0.0, 2: 1.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.0, 1: 0.0, 2: 1.0}]
 

The device depends on how the package was compiled, GPU or CPU.

-
print(get_device())
+
print(get_device())
 
-

Out:

CPU
 

The backend can also directly load the model without using onnx.

-
rep = backend.prepare(name, 'CPU')
-x = np.array([[-1.0, -2.0, -3.0, -4.0],
+
rep = backend.prepare(name, 'CPU')
+x = np.array([[-1.0, -2.0, -3.0, -4.0],
               [-1.0, -2.0, -3.0, -4.0],
               [-1.0, -2.0, -3.0, -4.0]],
-             dtype=np.float32)
-label, proba = rep.run(x)
-print("label={}".format(label))
-print("probabilities={}".format(proba))
+             dtype=np.float32)
+label, proba = rep.run(x)
+print("label={}".format(label))
+print("probabilities={}".format(proba))
 
-

Out:

label=[1 1 1]
-probabilities=[{0: 0.04737764596939087, 1: 0.9526224136352539, 2: 0.0}, {0: 0.04737764596939087, 1: 0.9526224136352539, 2: 0.0}, {0: 0.04737764596939087, 1: 0.9526224136352539, 2: 0.0}]
+probabilities=[{0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}, {0: 0.04301583021879196, 1: 0.9569841623306274, 2: 0.0}]
 

The backend API is implemented by other frameworks and makes it easier to switch between multiple runtimes with the same API.

Versions used for this example

-
print("numpy:", numpy.__version__)
-print("scikit-learn:", sklearn.__version__)
-print("onnx: ", onnx.__version__)
-print("onnxruntime: ", onnxruntime.__version__)
-print("skl2onnx: ", skl2onnx.__version__)
+
print("numpy:", numpy.__version__)
+print("scikit-learn:", sklearn.__version__)
+print("onnx: ", onnx.__version__)
+print("onnxruntime: ", onnxruntime.__version__)
+print("skl2onnx: ", skl2onnx.__version__)
 
-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.419 seconds)

-

Gallery generated by Sphinx-Gallery

-
- + + +
+
- - -
-
- - -
-
- - - -
- +
- - +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_benchmark_cdist.html b/auto_examples/plot_benchmark_cdist.html index c577ce723..5ee6c58c2 100644 --- a/auto_examples/plot_benchmark_cdist.html +++ b/auto_examples/plot_benchmark_cdist.html @@ -1,403 +1,296 @@ - - - - - - - - Compare CDist with scipy — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Compare CDist with scipy - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - -
- - -
- -
- On this page +
- -
- -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Compare CDist with scipy

+
+

Compare CDist with scipy#

The following example focuses on one particular operator, CDist and compares its execution time between onnxruntime and scipy.

- -
-

ONNX Graph with CDist

+
+

ONNX Graph with CDist#

cdist function computes pairwise distances.

from pprint import pprint
@@ -419,7 +312,6 @@ 

ONNX Graph with CDistprint(cdist(X, Y, metric='euclidean'))

-

Out:

[[2. 2. 2.]
  [2. 2. 2.]]
 
@@ -432,10 +324,9 @@

ONNX Graph with CDistprint(onx)

-

Out:

+
+

CDist and onnxruntime#

We compute the output of CDist operator with onnxruntime.

-
sess = InferenceSession(onx.SerializeToString())
+
sess = InferenceSession(onx.SerializeToString(),
+                        providers=["CPUExecutionProvider"])
 res = sess.run(None, {'X': X, 'Y': Y})
 print(res)
 
-

Out:

[array([[1.9999999, 1.9999999, 1.9999999],
        [1.9999999, 2.       , 2.       ]], dtype=float32)]
 
-
-
+
+

Benchmark#

Let’s compare onnxruntime and scipy.

def measure_time(name, stmt, context, repeat=100, number=20):
     tim = Timer(stmt, globals=context)
@@ -539,11 +430,10 @@ 

Benchmarkpprint(time_scipy)

-

Out:

-
{'average': 8.223000000002757e-06,
- 'deviation': 2.9731616168713808e-06,
- 'max_exec': 2.310500000000104e-05,
- 'min_exec': 5.42499999998114e-06,
+
{'average': 6.455253499780156e-06,
+ 'deviation': 8.708922725775654e-07,
+ 'max_exec': 1.072999999678359e-05,
+ 'min_exec': 5.390000001170847e-06,
  'name': 'scipy',
  'ncols': 4,
  'nrows': 2,
@@ -558,11 +448,10 @@ 

Benchmarkpprint(time_ort)

-

Out:

-
{'average': 1.5459150000008747e-05,
- 'deviation': 2.270178853640837e-06,
- 'max_exec': 2.3989999999862732e-05,
- 'min_exec': 1.2139999999938311e-05,
+
{'average': 1.2687857000173608e-05,
+ 'deviation': 1.6693917723816003e-06,
+ 'max_exec': 1.7610000008971836e-05,
+ 'min_exec': 1.0040000006483751e-05,
  'name': 'ort',
  'ncols': 4,
  'nrows': 2,
@@ -595,16 +484,14 @@ 

Benchmarkdf.plot(x='N', y=['scipy/ort'])

-plot benchmark cdist

Out:

-
  0%|          | 0/4 [00:00<?, ?it/s]
- 50%|#####     | 2/4 [00:00<00:00, 14.49it/s]
-100%|##########| 4/4 [00:03<00:00,  1.07it/s]
-100%|##########| 4/4 [00:03<00:00,  1.24it/s]
+plot benchmark cdist
  0%|          | 0/4 [00:00<?, ?it/s]
+ 75%|#######5  | 3/4 [00:00<00:00, 11.93it/s]
+100%|##########| 4/4 [00:01<00:00,  2.19it/s]
        N     scipy       ort  scipy/ort
-0     10  0.000006  0.000018   0.353662
-1    100  0.000022  0.000021   1.040904
-2   1000  0.000185  0.000085   2.181040
-3  10000  0.000965  0.000302   3.198600
+0     10  0.000004  0.000009   0.467956
+1    100  0.000009  0.000012   0.780008
+2   1000  0.000065  0.000024   2.775544
+3  10000  0.000591  0.000193   3.055958
 

Versions used for this example

@@ -614,18 +501,14 @@

Benchmarkprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 3.862 seconds)

- - +
+ +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_benchmark_pipeline.html b/auto_examples/plot_benchmark_pipeline.html index 2500b11e5..554f33add 100644 --- a/auto_examples/plot_benchmark_pipeline.html +++ b/auto_examples/plot_benchmark_pipeline.html @@ -1,414 +1,295 @@ - - - - - - - - Benchmark a pipeline — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Benchmark a pipeline - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Benchmark a pipeline

+
+

Benchmark a pipeline#

The following example checks up on every step in a pipeline, compares and benchmarks the predictions.

- -
-

Create a pipeline

+
+

Create a pipeline#

We reuse the pipeline implemented in example Pipelining: chaining a PCA and a logistic regression. There is one change because @@ -444,8 +325,7 @@

Create a pipelinepipe.fit(X_digits, y_digits)

-

Out:

-
C:\xadupre\github\scikit-learn\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1):
+
/home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):
 STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
 
 Increase the number of iterations (max_iter) or scale the data as shown in:
@@ -453,18 +333,21 @@ 

Create a pipeline +
Pipeline(steps=[('pca', PCA()), ('logistic', LogisticRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.

-
-

Conversion to ONNX

+
+
+
+

Conversion to ONNX#

initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]
 model_onnx = convert_sklearn(pipe, initial_types=initial_types,
                              target_opset=12)
 
-sess = rt.InferenceSession(model_onnx.SerializeToString())
+sess = rt.InferenceSession(model_onnx.SerializeToString(),
+                           providers=["CPUExecutionProvider"])
 print("skl predict_proba")
 print(pipe.predict_proba(X_digits[:2]))
 onx_pred = sess.run(None, {'input': X_digits[:2].astype(np.float32)})[1]
@@ -473,33 +356,32 @@ 

Conversion to ONNXprint(df.values)

-

Out:

skl predict_proba
-[[9.99998536e-01 5.99063532e-19 3.48549155e-10 1.55765807e-08
-  3.32559909e-10 1.21314723e-06 3.98960115e-08 1.22513893e-07
-  2.23871287e-08 4.98148626e-08]
- [1.47648497e-14 9.99999301e-01 1.05811969e-10 7.49298734e-13
-  2.48627456e-07 8.75686305e-12 5.39025172e-11 2.95899962e-11
-  4.50528996e-07 1.30607533e-13]]
+[[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08
+  3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07
+  2.23871272e-08 4.98148509e-08]
+ [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13
+  2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11
+  4.50528833e-07 1.30607478e-13]]
 onnx predict_proba
 [[9.99998569e-01 5.99062501e-19 3.48550355e-10 1.55766493e-08
-  3.32561811e-10 1.21315361e-06 3.98961184e-08 1.22514706e-07
+  3.32561811e-10 1.21315134e-06 3.98961930e-08 1.22514706e-07
   2.23872494e-08 4.98151529e-08]
  [1.47648956e-14 9.99999285e-01 1.05811991e-10 7.49297488e-13
-  2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95900075e-11
-  4.50528631e-07 1.30607344e-13]]
+  2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95899520e-11
+  4.50529058e-07 1.30607344e-13]]
 
-
-
-

Comparing outputs

+ +
+

Comparing outputs#

compare_objects(pipe.predict_proba(X_digits[:2]), onx_pred)
 # No exception so they are the same.
 
-
-
-

Benchmarks

+ +
+

Benchmarks#

print("scikit-learn")
 print(timeit("pipe.predict_proba(X_digits[:1])",
              number=10000, globals=globals()))
@@ -508,16 +390,15 @@ 

Benchmarksnumber=10000, globals=globals()))

-

Out:

scikit-learn
-1.3873941999999957
+2.355312850000246
 onnxruntime
-0.30984360000000066
+0.29348953099997743
 
-
-
-

Intermediate steps

+ +
+

Intermediate steps#

Let’s imagine the final output is wrong and we need to look into each component of the pipeline which one is failing. The following method modifies the scikit-learn @@ -532,7 +413,8 @@

Intermediate stepsfor i, step in enumerate(steps): onnx_step = step['onnx_step'] - sess = rt.InferenceSession(onnx_step.SerializeToString()) + sess = rt.InferenceSession(onnx_step.SerializeToString(), + providers=["CPUExecutionProvider"]) onnx_outputs = sess.run(None, {'input': X_digits[:2].astype(np.float32)}) skl_outputs = step['model']._debug.outputs if 'transform' in skl_outputs: @@ -552,17 +434,16 @@

Intermediate stepsnumber=10000, globals=globals()))

-

Out:

benchmark <class 'sklearn.decomposition._pca.PCA'>
 scikit-learn
-0.6001067999999989
+0.6831115730001329
 onnxruntime
-0.1690043999999986
+0.16402971700017588
 benchmark <class 'sklearn.linear_model._logistic.LogisticRegression'>
 scikit-learn
-0.8385802000000027
+1.4586870539997108
 onnxruntime
-0.29466279999999756
+0.15432031699992876
 

Versions used for this example

@@ -573,19 +454,15 @@

Intermediate stepsprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 4.030 seconds)

- - + + + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_black_op.html b/auto_examples/plot_black_op.html index cdffcc420..44ab09e0b 100644 --- a/auto_examples/plot_black_op.html +++ b/auto_examples/plot_black_op.html @@ -1,417 +1,298 @@ - - - - - - - - Convert a model with a reduced list of operators — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Convert a model with a reduced list of operators - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Convert a model with a reduced list of operators

+
+

Convert a model with a reduced list of operators#

Some runtime dedicated to onnx do not implement all the operators and a converted model may not run if one of them is missing from the list of available operators. Some converters may convert a model in different ways if the users wants to blacklist some operators.

- -
-

GaussianMixture

+
+

GaussianMixture#

The first converter to change its behaviour depending on a black list of operators is for model GaussianMixture.

import onnxruntime
@@ -434,31 +315,31 @@ 

GaussianMixturemodel.fit(X_train)

-

Out:

-
GaussianMixture()
-
-
+
+
GaussianMixture()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
+
+

Default conversion#

model_onnx = to_onnx(
     model, X_train[:1].astype(np.float32),
     options={id(model): {'score_samples': True}},
     target_opset=12)
-sess = InferenceSession(model_onnx.SerializeToString())
+sess = InferenceSession(model_onnx.SerializeToString(),
+                        providers=["CPUExecutionProvider"])
 
 xt = X_test[:5].astype(np.float32)
 print(model.score_samples(xt))
 print(sess.run(None, {'X': xt})[2])
 
-

Out:

-
[-7.60436825 -1.81983593 -1.14948678 -3.46242545 -2.03540971]
-[[-7.6043735]
- [-1.8198361]
- [-1.1494861]
- [-3.4624252]
- [-2.03541  ]]
+
[-2.35262849 -1.77470989 -1.85001598 -3.50263433 -2.44689391]
+[[-2.3526287]
+ [-1.7747092]
+ [-1.8500156]
+ [-3.5026336]
+ [-2.4468932]]
 

Display the ONNX graph.

@@ -470,19 +351,18 @@

Default conversionos.system('dot -O -Gdpi=300 -Tpng mixture.dot') -image = plt.imread("mixture.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("mixture.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot black op

Out:

-
(-0.5, 4522.5, 8425.5, -0.5)
+plot black op
(-0.5, 5287.5, 8425.5, -0.5)
 
-
-
+
+

Conversion without ReduceLogSumExp#

Parameter black_op is used to tell the converter not to use this operator. Let’s see what the converter produces in that case.

@@ -491,20 +371,20 @@

Conversion without ReduceLogSumExpoptions={id(model): {'score_samples': True}}, black_op={'ReduceLogSumExp'}, target_opset=12) -sess2 = InferenceSession(model_onnx2.SerializeToString()) +sess2 = InferenceSession(model_onnx2.SerializeToString(), + providers=["CPUExecutionProvider"]) xt = X_test[:5].astype(np.float32) print(model.score_samples(xt)) print(sess2.run(None, {'X': xt})[2])

-

Out:

-
[-7.60436825 -1.81983593 -1.14948678 -3.46242545 -2.03540971]
-[[-7.6043735]
- [-1.8198361]
- [-1.1494861]
- [-3.4624257]
- [-2.03541  ]]
+
[-2.35262849 -1.77470989 -1.85001598 -3.50263433 -2.44689391]
+[[-2.3526287]
+ [-1.7747092]
+ [-1.8500156]
+ [-3.5026336]
+ [-2.4468932]]
 

Display the ONNX graph.

@@ -516,19 +396,18 @@

Conversion without ReduceLogSumExpos.system('dot -O -Gdpi=300 -Tpng mixture2.dot') -image = plt.imread("mixture2.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("mixture2.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot black op

Out:

-
(-0.5, 4305.5, 13264.5, -0.5)
+plot black op
(-0.5, 4921.5, 13264.5, -0.5)
 
-
-
-

Processing time

+ +
+

Processing time#

print(timeit(stmt="sess.run(None, {'X': xt})",
              number=10000, globals={'sess': sess, 'xt': xt}))
 
@@ -536,15 +415,14 @@ 

Processing timenumber=10000, globals={'sess2': sess2, 'xt': xt}))

-

Out:

-
0.30932419999999894
-0.38210510000000397
+
0.4812870999999177
+0.8075209660000837
 

The model using ReduceLogSumExp is much faster.

-
-
+
+

If the converter cannot convert without…#

Many converters do not consider the white and black lists of operators. If a converter fails to convert without using a blacklisted operator (or only whitelisted operators), @@ -559,7 +437,6 @@

If the converter cannot convert without print('Error:', e)

-

Out:

Error: Operator 'Add' is black listed.
 
@@ -573,19 +450,15 @@

If the converter cannot convert without print("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 20.711 seconds)

-

Gallery generated by Sphinx-Gallery

-
-
- + + + +
+
- - -
- - - + - - + + + + + \ No newline at end of file diff --git a/auto_examples/plot_cast_transformer.html b/auto_examples/plot_cast_transformer.html index 84b95f527..981e7ce99 100644 --- a/auto_examples/plot_cast_transformer.html +++ b/auto_examples/plot_cast_transformer.html @@ -1,386 +1,291 @@ - - - - - - - - Discrepencies with StandardScaler — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Discrepencies with StandardScaler - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - -
- - -
- -
- On this page +
- -
- -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Discrepencies with StandardScaler

+
+

Discrepencies with StandardScaler#

A StandardScaler does a very basic scaling. The conversion in ONNX assumes that (x / y) is equivalent to x * ( 1 / y) but that’s not @@ -391,14 +296,8 @@ a decision tree. One small difference and the decision follows another path in the tree. Let’s see how to solve that issue.

- -
-

An example with fails

+
+

An example with fails#

This is not a typical example, it is build to make it fails based on the assumption (x / y) is usually different from x * ( 1 / y) on a computer.

@@ -445,8 +344,10 @@

An example with fails
onx1 = to_onnx(model1, X_train[:1].astype(np.float32))
-sess1 = InferenceSession(onx1.SerializeToString())
+
onx1 = to_onnx(model1, X_train[:1].astype(np.float32),
+               target_opset=15)
+sess1 = InferenceSession(onx1.SerializeToString(),
+                         providers=["CPUExecutionProvider"])
 

And the maximum difference.

@@ -462,7 +363,6 @@

An example with failsprint(md1)

-

Out:

322.39065126389346
 
@@ -475,19 +375,18 @@

An example with failsos.system('dot -O -Gdpi=300 -Tpng cast1.dot') -image = plt.imread("cast1.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("cast1.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot cast transformer

Out:

-
(-0.5, 2007.5, 1707.5, -0.5)
+plot cast transformer
(-0.5, 2536.5, 1707.5, -0.5)
 
-
-
-

New pipeline

+ +
+

New pipeline#

Fixing the conversion requires to replace (x * (1 / y) by (x / y) and this division must happen in double. By default, the sklearn-onnx assumes every @@ -508,17 +407,18 @@

New pipelineexp2 = model2.predict(Xi_test) onx2 = to_onnx(model2, X_train[:1].astype(np.float32), - options={StandardScaler: {'div': 'div_cast'}}) + options={StandardScaler: {'div': 'div_cast'}}, + target_opset=15) -sess2 = InferenceSession(onx2.SerializeToString()) +sess2 = InferenceSession(onx2.SerializeToString(), + providers=["CPUExecutionProvider"]) got2 = sess2.run(None, {'X': Xi_test})[0] md2 = maxdiff(exp2, got2) print(md2)

-

Out:

-
2.9884569130445016e-05
+
2.9884569016758178e-05
 

The graph.

@@ -530,14 +430,13 @@

New pipelineos.system('dot -O -Gdpi=300 -Tpng cast2.dot') -image = plt.imread("cast2.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("cast2.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot cast transformer

Out:

-
(-0.5, 2007.5, 4171.5, -0.5)
+plot cast transformer
(-0.5, 2536.5, 4171.5, -0.5)
 

Versions used for this example

@@ -550,19 +449,15 @@

New pipelineprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 3.542 seconds)

- -
- + + + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_complex_pipeline.html b/auto_examples/plot_complex_pipeline.html index 0b3ef796f..193637a3e 100644 --- a/auto_examples/plot_complex_pipeline.html +++ b/auto_examples/plot_complex_pipeline.html @@ -1,418 +1,299 @@ - - - - - - - - Convert a pipeline with ColumnTransformer — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Convert a pipeline with ColumnTransformer - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Convert a pipeline with ColumnTransformer

+
+

Convert a pipeline with ColumnTransformer#

scikit-learn recently shipped ColumnTransformer which lets the user define complex pipeline where each column may be preprocessed with a different transformer. sklearn-onnx still works in this case as shown in Section Convert complex pipelines.

- -
-

Create and train a complex pipeline

+
+

Create and train a complex pipeline#

We reuse the pipeline implemented in example Column Transformer with Mixed Types. There is one change because @@ -479,7 +360,6 @@

Create and train a complex pipelineclf.fit(X_train, y_train)

-

Out:

pclass         int64
 survived       int64
 name          object
@@ -495,32 +375,53 @@ 

Create and train a complex pipeline

-
-
-

Define the inputs of the ONNX graph

+
+
Pipeline(steps=[('preprocessor',
+                 ColumnTransformer(transformers=[('num',
+                                                  Pipeline(steps=[('imputer',
+                                                                   SimpleImputer(strategy='median')),
+                                                                  ('scaler',
+                                                                   StandardScaler())]),
+                                                  ['age', 'fare']),
+                                                 ('cat',
+                                                  Pipeline(steps=[('onehot',
+                                                                   OneHotEncoder(handle_unknown='ignore'))]),
+                                                  ['embarked', 'sex',
+                                                   'pclass'])])),
+                ('classifier', LogisticRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Define the inputs of the ONNX graph#

sklearn-onnx does not know the features used to train the model but it needs to know which feature has which name. We simply reuse the dataframe column definition.

-

Out:

pclass         int64
 name          object
 sex           object
@@ -558,7 +459,6 @@ 

Define the inputs of the ONNX graphpprint.pprint(initial_inputs)

-

Out:

[('pclass', Int64TensorType(shape=[None, 1])),
  ('name', StringTensorType(shape=[None, 1])),
  ('sex', StringTensorType(shape=[None, 1])),
@@ -577,9 +477,9 @@ 

Define the inputs of the ONNX graphMerging single column into vectors is not the most efficient way to compute the prediction. It could be done before converting the pipeline into a graph.

-

-
+
+

Convert the pipeline into ONNX#

try:
     model_onnx = convert_sklearn(clf, 'pipeline_titanic', initial_inputs,
                                  target_opset=12)
@@ -614,9 +514,9 @@ 

Convert the pipeline into ONNXf.write(model_onnx.SerializeToString())

-
-
-

Compare the predictions

+ +
+

Compare the predictions#

Final step, we need to ensure the converted model produces the same predictions, labels and probabilities. Let’s start with scikit-learn.

@@ -624,10 +524,9 @@

Compare the predictionsprint("predict_proba", clf.predict_proba(X_test[:2]))

-

Out:

-
predict [1 1 0 1 0]
-predict_proba [[0.44469197 0.55530803]
- [0.274616   0.725384  ]]
+
predict [0 0 0 1 1]
+predict_proba [[0.79113857 0.20886143]
+ [0.90867425 0.09132575]]
 

Predictions with onnxruntime. @@ -648,15 +547,15 @@

Compare the predictions
sess = rt.InferenceSession("pipeline_titanic.onnx")
+
sess = rt.InferenceSession("pipeline_titanic.onnx",
+                           providers=["CPUExecutionProvider"])
 pred_onx = sess.run(None, inputs)
 print("predict", pred_onx[0][:5])
 print("predict_proba", pred_onx[1][:2])
 
-

Out:

-
predict [1 1 0 1 0]
-predict_proba [{0: 0.4446920156478882, 1: 0.5553079843521118}, {0: 0.274616003036499, 1: 0.725383996963501}]
+
predict [0 0 0 1 1]
+predict_proba [{0: 0.7911385297775269, 1: 0.20886144042015076}, {0: 0.9086743593215942, 1: 0.09132567048072815}]
 

The output of onnxruntime is a list of dictionaries. @@ -668,25 +567,25 @@

Compare the predictionswith open("pipeline_titanic_nozipmap.onnx", "wb") as f: f.write(model_onnx.SerializeToString()) -sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx") +sess = rt.InferenceSession("pipeline_titanic_nozipmap.onnx", + providers=["CPUExecutionProvider"]) pred_onx = sess.run(None, inputs) print("predict", pred_onx[0][:5]) print("predict_proba", pred_onx[1][:2])

-

Out:

-
predict [1 1 0 1 0]
-predict_proba [[0.44469202 0.555308  ]
- [0.274616   0.725384  ]]
+
predict [0 0 0 1 1]
+predict_proba [[0.7911385  0.20886144]
+ [0.90867436 0.09132567]]
 

Let’s check they are the same.

assert_almost_equal(clf.predict_proba(X_test), pred_onx[1])
 
-
-
-

Display the ONNX graph

+ +
+

Display the ONNX graph#

Finally, let’s see the graph converted with sklearn-onnx.

pydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name,
                             rankdir="TB",
@@ -698,14 +597,13 @@ 

Compare the predictionsos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic.dot') -image = plt.imread("pipeline_titanic.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_titanic.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot complex pipeline

Out:

-
(-0.5, 5912.5, 6812.5, -0.5)
+plot complex pipeline
(-0.5, 6901.5, 6049.5, -0.5)
 

Versions used for this example

@@ -716,19 +614,15 @@

Compare the predictionsprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 7.062 seconds)

- - +
+ + +
+
- - -
- - - +

- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_convert_decision_function.html b/auto_examples/plot_convert_decision_function.html index 63d4f7906..8cf85d31a 100644 --- a/auto_examples/plot_convert_decision_function.html +++ b/auto_examples/plot_convert_decision_function.html @@ -1,391 +1,291 @@ - - - - - - - - Probabilities or raw scores — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Probabilities or raw scores - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ +
+ +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Probabilities or raw scores

+
+

Probabilities or raw scores#

A classifier usually returns a matrix of probabilities. By default, sklearn-onnx creates an ONNX graph which returns probabilities but it may skip that @@ -393,15 +293,8 @@ the method decision_function. Option 'raw_scores' is used to change the default behaviour. Let’s see that on a simple example.

- -
-

Train a model and convert it

+
+

Train a model and convert it#

import numpy
 import sklearn
 from sklearn.datasets import load_iris
@@ -425,43 +318,42 @@ 

Train a model and convert ittarget_opset=12)

-

Out:

LogisticRegression(max_iter=500)
 
-
-
+
+

Output type#

Let’s confirm the output type of the probabilities is a list of dictionaries with onnxruntime.

-
sess = rt.InferenceSession(onx.SerializeToString())
+
sess = rt.InferenceSession(onx.SerializeToString(),
+                           providers=["CPUExecutionProvider"])
 res = sess.run(None, {'float_input': X_test.astype(numpy.float32)})
 print("skl", clr.predict_proba(X_test[:1]))
 print("onnx", res[1][:2])
 
-

Out:

-
skl [[0.00386787 0.7751643  0.22096783]]
-onnx [{0: 0.003867867635563016, 1: 0.7751643061637878, 2: 0.2209678441286087}, {0: 6.703807594021782e-05, 1: 0.12190072238445282, 2: 0.8780322670936584}]
+
skl [[9.94895805e-01 5.10418332e-03 1.21479166e-08]]
+onnx [{0: 0.9948958158493042, 1: 0.005104185082018375, 2: 1.2147937766826544e-08}, {0: 0.029782379046082497, 1: 0.9085215926170349, 2: 0.061696045100688934}]
 
-
-
+
+

Raw scores and decision_function#

initial_type = [('float_input', FloatTensorType([None, 4]))]
 options = {id(clr): {'raw_scores': True}}
 onx2 = convert_sklearn(clr, initial_types=initial_type, options=options,
                        target_opset=12)
 
-sess2 = rt.InferenceSession(onx2.SerializeToString())
+sess2 = rt.InferenceSession(onx2.SerializeToString(),
+                            providers=["CPUExecutionProvider"])
 res2 = sess2.run(None, {'float_input': X_test.astype(numpy.float32)})
 print("skl", clr.decision_function(X_test[:1]))
 print("onnx", res2[1][:2])
 
-

Out:

-
skl [[-3.11522826  2.18514306  0.9300852 ]]
-onnx [{0: -3.1152286529541016, 1: 2.185142993927002, 2: 0.9300851821899414}, {0: -5.661959648132324, 1: 1.843741774559021, 2: 3.818218231201172}]
+
skl [[  7.83118948   2.55861193 -10.38980141]]
+onnx [{0: 7.831189155578613, 1: 2.558612108230591, 2: -10.389801025390625}, {0: -1.3820686340332031, 1: 2.0358331203460693, 2: -0.6537656784057617}]
 

Versions used for this example

@@ -472,19 +364,15 @@

Raw scores and decision_functionprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.087 seconds)

- -
- +
+ +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_convert_model.html b/auto_examples/plot_convert_model.html index 75d47ef42..853acc7d0 100644 --- a/auto_examples/plot_convert_model.html +++ b/auto_examples/plot_convert_model.html @@ -1,391 +1,291 @@ - - - - - - - - Train, convert and predict a model — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Train, convert and predict a model - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ +
+ +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Train, convert and predict a model

+
+

Train, convert and predict a model#

Train and deploy a model usually involves the three following steps:

    @@ -393,15 +293,8 @@
  • convert it into ONNX with sklearn-onnx,

  • predict with onnxruntime.

- -
-

Train a model

+
+

Train a model#

A very basic example using random forest and the iris dataset.

import skl2onnx
@@ -423,13 +316,12 @@ 

Train a modelprint(clr)

-

Out:

RandomForestClassifier()
 
-
-
+
+

Convert a model into ONNX#

initial_type = [('float_input', FloatTensorType([None, 4]))]
 onx = convert_sklearn(clr, initial_types=initial_type,
                       target_opset=12)
@@ -438,10 +330,10 @@ 

Convert a model into ONNXf.write(onx.SerializeToString())

-
-
-

Compute the prediction with ONNX Runtime

-
sess = rt.InferenceSession("rf_iris.onnx")
+
+
+

Compute the prediction with ONNX Runtime#

+
sess = rt.InferenceSession("rf_iris.onnx", providers=["CPUExecutionProvider"])
 input_name = sess.get_inputs()[0].name
 label_name = sess.get_outputs()[0].name
 pred_onx = sess.run(
@@ -449,8 +341,7 @@ 

Compute the prediction with ONNX Runtime< print(pred_onx)

-

Out:

-
[2 1 2 1 1 1 1 2 2 1 0 1 0 1 1 2 0 2 0 0 0 2 0 1 0 0 0 2 1 2 2 2 1 1 2 2 0
+
[0 2 1 0 0 2 0 0 1 0 1 0 0 0 1 1 1 0 1 0 1 1 0 2 0 2 2 2 0 1 2 1 2 0 1 2 1
  2]
 
@@ -471,8 +362,15 @@

Compute the prediction with ONNX Runtime< print(pred_onx)

-

Out:

-
[2 1 2 1 1 1 1 2 2 1 0 1 0 1 1 2 0 2 0 0 0 2 0 1 0 0 0 2 1 2 2 2 1 1 2 2 0
+
/home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):
+STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
+
+Increase the number of iterations (max_iter) or scale the data as shown in:
+    https://scikit-learn.org/stable/modules/preprocessing.html
+Please also refer to the documentation for alternative solver options:
+    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
+  n_iter_i = _check_optimize_result(
+[0 2 1 0 0 2 0 0 1 0 1 0 0 0 1 1 1 0 1 0 1 1 0 2 0 2 2 2 0 1 2 1 2 0 1 2 1
  2]
 
@@ -484,19 +382,15 @@

Compute the prediction with ONNX Runtime< print("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.273 seconds)

-

Gallery generated by Sphinx-Gallery

-
-
- +
+ +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_convert_syntax.html b/auto_examples/plot_convert_syntax.html index 034da7896..7939a6652 100644 --- a/auto_examples/plot_convert_syntax.html +++ b/auto_examples/plot_convert_syntax.html @@ -1,409 +1,295 @@ - - - - - - - - Different ways to convert a model — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Different ways to convert a model - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ +
+ +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Different ways to convert a model

+
+

Different ways to convert a model#

This example leverages some code added to implement custom converters in an easy way.

- -
-

Predict with onnxruntime

+
+

Predict with onnxruntime#

Simple function to check the converted model works fine.

import onnxruntime
@@ -411,7 +297,7 @@ 

Predict with onnxruntimeimport numpy import numpy as np from sklearn.base import BaseEstimator, TransformerMixin -from sklearn.cluster import KMeans +from sklearn.cluster import KMeans from sklearn.pipeline import make_pipeline from onnxruntime import InferenceSession from skl2onnx import convert_sklearn, to_onnx, wrap_as_onnx_mixin @@ -427,12 +313,12 @@

Predict with onnxruntimereturn res[0]

-
-
+
+

Simple KMeans#

The first way: convert_sklearn().

X = np.arange(20).reshape(10, 2)
-tr = KMeans(n_clusters=2)
+tr = KMeans(n_clusters=2)
 tr.fit(X)
 
 onx = convert_sklearn(
@@ -441,29 +327,27 @@ 

Simple KMeansprint(predict_with_onnxruntime(onx, X))

-

Out:

-
[0 0 0 0 0 1 1 1 1 1]
+
[1 1 1 1 1 0 0 0 0 0]
 

The second way: to_onnx(): no need to play with FloatTensorType anymore.

X = np.arange(20).reshape(10, 2)
-tr = KMeans(n_clusters=2)
+tr = KMeans(n_clusters=2)
 tr.fit(X)
 
 onx = to_onnx(tr, X.astype(np.float32), target_opset=12)
 print(predict_with_onnxruntime(onx, X))
 
-

Out:

-
[0 0 0 0 0 1 1 1 1 1]
+
[1 1 1 1 1 0 0 0 0 0]
 

The third way: wrap_as_onnx_mixin(): wraps the machine learned model into a new class inheriting from OnnxOperatorMixin.

X = np.arange(20).reshape(10, 2)
-tr = KMeans(n_clusters=2)
+tr = KMeans(n_clusters=2)
 tr.fit(X)
 
 tr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)
@@ -472,14 +356,13 @@ 

Simple KMeansprint(predict_with_onnxruntime(onx, X))

-

Out:

[0 0 0 0 0 1 1 1 1 1]
 

The fourth way: wrap_as_onnx_mixin(): can be called before fitting the model.

X = np.arange(20).reshape(10, 2)
-tr = wrap_as_onnx_mixin(KMeans(n_clusters=2),
+tr = wrap_as_onnx_mixin(KMeans(n_clusters=2),
                         target_opset=12)
 tr.fit(X)
 
@@ -487,13 +370,12 @@ 

Simple KMeansprint(predict_with_onnxruntime(onx, X))

-

Out:

-
[0 0 0 0 0 1 1 1 1 1]
+
[1 1 1 1 1 0 0 0 0 0]
 
-
-
+
+

Pipeline and a custom object#

This is a simple scaler.

class CustomOpTransformer(BaseEstimator, TransformerMixin,
                           OnnxOperatorMixin):
@@ -532,7 +414,7 @@ 

Pipeline and a custom object

Way 1

X = np.arange(20).reshape(10, 2)
-tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
+tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
 tr.fit(X)
 
 onx = convert_sklearn(
@@ -541,26 +423,24 @@ 

Pipeline and a custom objectprint(predict_with_onnxruntime(onx, X))

-

Out:

[1 1 1 1 1 0 0 0 0 0]
 

Way 2

X = np.arange(20).reshape(10, 2)
-tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
+tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
 tr.fit(X)
 
 onx = to_onnx(tr, X.astype(np.float32), target_opset=12)
 print(predict_with_onnxruntime(onx, X))
 
-

Out:

[1 1 1 1 1 0 0 0 0 0]
 

Way 3

X = np.arange(20).reshape(10, 2)
-tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
+tr = make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2))
 tr.fit(X)
 
 tr_mixin = wrap_as_onnx_mixin(tr, target_opset=12)
@@ -569,14 +449,13 @@ 

Pipeline and a custom objectprint(predict_with_onnxruntime(onx, X))

-

Out:

[1 1 1 1 1 0 0 0 0 0]
 

Way 4

X = np.arange(20).reshape(10, 2)
 tr = wrap_as_onnx_mixin(
-    make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),
+    make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),
     target_opset=12)
 
 tr.fit(X)
@@ -585,13 +464,12 @@ 

Pipeline and a custom objectprint(predict_with_onnxruntime(onx, X))

-

Out:

-
[0 0 0 0 0 1 1 1 1 1]
+
[1 1 1 1 1 0 0 0 0 0]
 
-
-

+
+

Display the ONNX graph#

Finally, let’s see the graph converted with sklearn-onnx.

from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer  # noqa
 pydot_graph = GetPydotGraph(onx.graph, name=onx.graph.name, rankdir="TB",
@@ -604,14 +482,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_onnx_mixin.dot') import matplotlib.pyplot as plt # noqa -image = plt.imread("pipeline_onnx_mixin.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_onnx_mixin.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot convert syntax

Out:

-
(-0.5, 2599.5, 6900.5, -0.5)
+plot convert syntax
(-0.5, 3103.5, 6900.5, -0.5)
 

Versions used for this example

@@ -624,19 +501,15 @@

Display the ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 4.354 seconds)

- - +
+ +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_convert_zipmap.html b/auto_examples/plot_convert_zipmap.html index 8f8e91e3f..be3df8450 100644 --- a/auto_examples/plot_convert_zipmap.html +++ b/auto_examples/plot_convert_zipmap.html @@ -1,402 +1,291 @@ - - - - - - - - Probabilities as a vector or as a ZipMap — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Probabilities as a vector or as a ZipMap - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Probabilities as a vector or as a ZipMap

+
+

Probabilities as a vector or as a ZipMap#

A classifier usually returns a matrix of probabilities. By default, sklearn-onnx converts that matrix into a list of dictionaries where each probabily is mapped @@ -404,17 +293,8 @@ This conversion increases the prediction time and is not always needed. Let’s see how to deactivate this behaviour on the Iris example.

- -
-

Train a model and convert it

+
+

Train a model and convert it#

from timeit import repeat
 import numpy
 import sklearn
@@ -439,13 +319,12 @@ 

Train a model and convert ittarget_opset=12)

-

Out:

LogisticRegression(max_iter=500)
 
-
-
+
+

Output type#

Let’s confirm the output type of the probabilities is a list of dictionaries with onnxruntime.

sess = rt.InferenceSession(onx.SerializeToString())
@@ -455,15 +334,14 @@ 

Output typeprint("type for the first observations:", type(res[1][0]))

-

Out:

-
[{0: 0.021887362003326416, 1: 0.9477903246879578, 2: 0.03032231144607067}, {0: 0.9532396197319031, 1: 0.04676028713583946, 2: 1.4053124175461562e-07}]
+
[{0: 0.9724591970443726, 1: 0.027540581300854683, 2: 2.352435330976732e-07}, {0: 0.9776268601417542, 1: 0.022372985258698463, 2: 1.9038986920349998e-07}]
 probabilities type: <class 'list'>
 type for the first observations: <class 'dict'>
 
-
-
+
+

Without ZipMap#

Let’s remove the ZipMap operator.

initial_type = [('float_input', FloatTensorType([None, 4]))]
 options = {id(clr): {'zipmap': False}}
@@ -477,16 +355,15 @@ 

Without ZipMapprint("type for the first observations:", type(res2[1][0]))

-

Out:

-
[[2.1887362e-02 9.4779032e-01 3.0322311e-02]
- [9.5323962e-01 4.6760287e-02 1.4053124e-07]]
+
[[9.7245920e-01 2.7540581e-02 2.3524353e-07]
+ [9.7762686e-01 2.2372985e-02 1.9038987e-07]]
 probabilities type: <class 'numpy.ndarray'>
 type for the first observations: <class 'numpy.ndarray'>
 
-
-
+
+

One output per class#

This options removes the final operator ZipMap and splits the probabilities into columns. The final model produces one output for the label, and one output per class.

@@ -501,16 +378,15 @@

One output per classout.name, res3[i].shape, res3[i][:2]))

-

Out:

-
output: 'output_label' shape=(38,) values=[1 0]...
-output: 'i0' shape=(38,) values=[0.02188736 0.9532396 ]...
-output: 'i1' shape=(38,) values=[0.9477903  0.04676029]...
-output: 'i2' shape=(38,) values=[3.0322311e-02 1.4053124e-07]...
+
output: 'output_label' shape=(38,) values=[0 0]...
+output: 'i0' shape=(38,) values=[0.9724592  0.97762686]...
+output: 'i1' shape=(38,) values=[0.02754058 0.02237299]...
+output: 'i2' shape=(38,) values=[2.3524353e-07 1.9038987e-07]...
 
-
-
-

Let’s compare prediction time

+ +
+

Let’s compare prediction time#

-

Out:

Time with ZipMap:
-[0.004017300000001001, 0.009114900000000148, 0.009305599999997582, 0.0054103999999952634, 0.004726100000006284, 0.006027799999998251, 0.009800300000001982, 0.007048799999999744, 0.007752400000001103, 0.008808100000003094]
+[0.012372604999882242, 0.004819102000055864, 0.002799700999958077, 0.005126302000007854, 0.004604101999802879, 0.0027419010000357957, 0.0030426010000610404, 0.0026854010000079143, 0.0026163009999891074, 0.0026090009998824826]
 Time without ZipMap:
-[0.002234899999997708, 0.0016254999999958386, 0.0030847000000022717, 0.003498000000000445, 0.002439699999996492, 0.0033817999999996573, 0.003481100000001902, 0.0036743000000001302, 0.0021257000000005633, 0.0017648000000036745]
+[0.0016827009999360598, 0.0011974000001373497, 0.0012046009999266971, 0.0011644000001069799, 0.0011607010001171147, 0.0012080000001333246, 0.001181700999950408, 0.0011657999998533342, 0.0013147009999556758, 0.0015800999999555643]
 Time without ZipMap but with columns:
-[0.003436299999997061, 0.0025292999999990684, 0.002416300000000149, 0.0025344999999958873, 0.002429499999998086, 0.0029727000000008275, 0.0028673999999995203, 0.0025380000000012615, 0.002769100000001856, 0.0037428999999988832]
+[0.00204040099993108, 0.0020714009999664995, 0.0020063009999375936, 0.001988400999834994, 0.0019787999999607564, 0.0019756009999127855, 0.0021800010001697956, 0.002363000999821452, 0.0021929009999439586, 0.002378501000066535]
 

Versions used for this example

@@ -550,19 +425,15 @@

Let’s compare prediction timeprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.264 seconds)

- -
- + + + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_custom_model.html b/auto_examples/plot_custom_model.html index 149f622d8..799cca2a7 100644 --- a/auto_examples/plot_custom_model.html +++ b/auto_examples/plot_custom_model.html @@ -1,406 +1,291 @@ - - - - - - - - Write your own converter for your own model — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Write your own converter for your own model - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Write your own converter for your own model

+
+

Write your own converter for your own model#

It might happen that you implemented your own model and there is obviously no existing converter for this new model. That does not mean the conversion of a pipeline @@ -413,31 +298,21 @@ fit_transform. This example proposes a way to train a machine learned model which approximates the outputs of a t-SNE transformer.

- -
-

Implementation of the new transform

+
+

Implementation of the new transform#

The first section is about the implementation. The code is quite generic but basically follows this process to fit the model with X and y:

    -
  • t-SNE, (X, y) \rightarrow X_2 \in \mathbb{R}^2

  • -
  • k nearest neightbours, fit(X, X_2), -which produces function f(X) \rightarrow X_3

  • -
  • final normalization, simple scaling X_3 \rightarrow X_4

  • +
  • t-SNE, (X, y) \rightarrow X_2 \in \mathbb{R}^2

  • +
  • k nearest neightbours, fit(X, X_2), +which produces function f(X) \rightarrow X_3

  • +
  • final normalization, simple scaling X_3 \rightarrow X_4

And to predict on a test set:

    -
  • k nearest neightbours, f(X') \rightarrow X'_3

  • -
  • final normalization, simple scaling X'_3 \rightarrow X'_4

  • +
  • k nearest neightbours, f(X') \rightarrow X'_3

  • +
  • final normalization, simple scaling X'_3 \rightarrow X'_4

-
-
+
+

Experimentation on MNIST#

Let’s fit t-SNE…

digits = datasets.load_digits(n_class=6)
 Xd = digits.data
@@ -623,13 +498,13 @@ 

Experimentation on MNISTx_min, x_max = numpy.min(Xp, 0), numpy.max(Xp, 0) X = (Xp - x_min) / (x_max - x_min) - fig, ax = plt.subplots(1, 2, figsize=figsize) + fig, ax = plt.subplots(1, 2, figsize=figsize) for i in range(X.shape[0]): ax[0].text(X[i, 0], X[i, 1], str(y[i]), - color=plt.cm.Set1(y[i] / 10.), + color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) - if hasattr(offsetbox, 'AnnotationBbox'): + if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0 shown_images = numpy.array([[1., 1.]]) # just something big for i in range(X.shape[0]): @@ -638,8 +513,8 @@

Experimentation on MNIST# don't show points that are too close continue shown_images = numpy.r_[shown_images, [X[i]]] - imagebox = offsetbox.AnnotationBbox( - offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r), + imagebox = offsetbox.AnnotationBbox( + offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r), X[i]) ax[0].add_artist(imagebox) ax[0].set_xticks([]), ax[0].set_yticks([]) @@ -654,14 +529,13 @@

Experimentation on MNIST"t-SNE embedding of the digits")

-t-SNE embedding of the digits

Out:

-
+
+

Repeatable t-SNE#

Just to check it is working.

ptsne_knn = PredictableTSNE()
 ptsne_knn.fit(X_train, y_train)
@@ -672,9 +546,8 @@ 

Repeatable t-SNE"StandardScaler+KNeighborsRegressor")

-Predictable t-SNE of the digits StandardScaler+KNeighborsRegressor

Out:

-
-Predictable t-SNE of the digits StandardScaler+KNeighborsRegressor

Out:

-
+
+

ONNX - shape_calculator, converter#

Now starts the part dedicated to ONNX. ONNX conversion requires two function, one to calculate the shape of the outputs based @@ -761,9 +633,9 @@

ONNX - shape_calculator, converterpredictable_tsne_converter)

-
-
-

Conversion to ONNX

+ +
+

Conversion to ONNX#

We just need to call convert_sklearn as any other model to convert.

model_onnx = convert_sklearn(
@@ -780,10 +652,9 @@ 

Conversion to ONNX

-

Out:

ptsne_knn.tranform
- [[1.0347298 1.2012889]
- [1.2406418 1.1664357]]
+ [[ 1.1081381   0.09305604]
+ [-0.05512788 -0.40800577]]
 

Predictions with onnxruntime.

@@ -793,8 +664,7 @@

Conversion to ONNXprint("transform", pred_onx[0])

-

Out:

-
transform [[1.0347298 1.2012889]]
+
transform [[1.1081381  0.09305604]]
 

The converter for the nearest neighbours produces an ONNX graph @@ -804,13 +674,12 @@

Conversion to ONNXprint("transform", pred_onx[0])

-

Out:

-
transform [[1.2406416 1.1664357]]
+
transform [[-0.05512788 -0.40800577]]
 
-
-
-

Display the ONNX graph

+ +
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer(
@@ -819,14 +688,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_tsne.dot') -image = plt.imread("pipeline_tsne.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_tsne.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot custom model

Out:

-
(-0.5, 2071.5, 9099.5, -0.5)
+plot custom model
(-0.5, 2643.5, 9099.5, -0.5)
 

Versions used for this example

@@ -837,19 +705,15 @@

Display the ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 13.684 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_custom_parser.html b/auto_examples/plot_custom_parser.html index bdcf31424..0d738e4f2 100644 --- a/auto_examples/plot_custom_parser.html +++ b/auto_examples/plot_custom_parser.html @@ -1,425 +1,299 @@ - - - - - - - - When a custom model is neither a classifier nor a regressor — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + When a custom model is neither a classifier nor a regressor - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

When a custom model is neither a classifier nor a regressor

+
+

When a custom model is neither a classifier nor a regressor#

scikit-learn’s API specifies that a regressor produces one outputs and a classifier produces two outputs, predicted labels and probabilities. The goal here is to add a third result which tells if the probability is above a given threshold. That’s implemented in method validate.

- -
-

Iris and scoring

+
+

Iris and scoring#

A new class is created, it trains any classifier and implements the method validate mentioned above.

import inspect
@@ -484,24 +358,23 @@ 

Iris and scoringmodel.fit(X_train, y_train)

-

Out:

-
ValidatorClassifier(estimator=LogisticRegression(solver='liblinear'))
-
+
+
ValidatorClassifier(estimator=LogisticRegression(solver='liblinear'))
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-

Let’s now measure the indicator which tells +
+

Let’s now measure the indicator which tells if the probability of a prediction is above a threshold.

print(model.validate(X_test))
 
-

Out:

-
[1 0 1 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1 0 0
- 0]
+
[1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 1 1
+ 1]
 
-
-
+
+

Conversion to ONNX#

The conversion fails for a new model because the library does not know any converter associated to this new model.

@@ -512,7 +385,6 @@

Conversion to ONNXprint(e)

-

Out:

Unable to find a shape calculator for type '<class '__main__.ValidatorClassifier'>'.
 It usually means the pipeline being converted contains a
 transformer or a predictor with no corresponding converter
@@ -527,9 +399,9 @@ 

Conversion to ONNX -

Custom converter

+ +
+

Custom converter#

We reuse some pieces of code from Write your own converter for your own model. The shape calculator defines the shape of every output of the converted model.

@@ -573,9 +445,19 @@

Custom converter# We now handle the validation. val_max = scope.get_unique_variable_name('val_max') - container.add_node('ReduceMax', val_prob.full_name, val_max, - name=scope.get_unique_operator_name('ReduceMax'), - axes=[1], keepdims=0) + if container.target_opset >= 18: + axis_name = scope.get_unique_variable_name('axis') + container.add_initializer( + axis_name, onnx_proto.TensorProto.INT64, [1], [1]) + container.add_node( + 'ReduceMax', [val_prob.full_name, axis_name], val_max, + name=scope.get_unique_operator_name('ReduceMax'), + keepdims=0) + else: + container.add_node( + 'ReduceMax', val_prob.full_name, val_max, + name=scope.get_unique_operator_name('ReduceMax'), + axes=[1], keepdims=0) th_name = scope.get_unique_variable_name('threshold') container.add_initializer( @@ -607,7 +489,6 @@

Custom converterprint(e)

-

Out:

3 outputs expected not 2.
 
@@ -615,9 +496,9 @@

Custom converter -

-
-

Custom parser

+ +
+

Custom parser#

def validator_classifier_parser(scope, model, inputs, custom_parsers=None):
     alias = get_model_alias(type(model))
     this_operator = scope.declare_local_operator(alias, model)
@@ -649,9 +530,9 @@ 

Custom parsertarget_opset=12)

-
-
-

Final test

+ +
+

Final test#

We need now to check the results are the same with ONNX.

X32 = X_test[:5].astype(np.float32)
 
@@ -669,30 +550,29 @@ 

Final testprint("onnx", results[2])

-

Out:

--labels--
-sklearn [0 2 1 2 1]
-onnx [0 2 1 2 1]
+sklearn [0 0 0 2 2]
+onnx [0 0 0 2 2]
 --probabilities--
-sklearn [[8.12045563e-01 1.87863055e-01 9.13814930e-05]
- [4.37049891e-04 3.50223916e-01 6.49339034e-01]
- [3.64923318e-02 8.11824505e-01 1.51683163e-01]
- [2.89201758e-03 4.19489309e-01 5.77618674e-01]
- [5.59949801e-02 6.80666124e-01 2.63338895e-01]]
-onnx [[8.1204557e-01 1.8786308e-01 9.1369213e-05]
- [4.3703648e-04 3.5022387e-01 6.4933908e-01]
- [3.6492348e-02 8.1182444e-01 1.5168323e-01]
- [2.8920460e-03 4.1948932e-01 5.7761866e-01]
- [5.5994958e-02 6.8066609e-01 2.6333898e-01]]
+sklearn [[8.15388734e-01 1.84445827e-01 1.65438648e-04]
+ [8.62653409e-01 1.37139236e-01 2.07354284e-04]
+ [9.04537964e-01 9.53809626e-02 8.10733209e-05]
+ [4.98573169e-02 3.25151760e-01 6.24990923e-01]
+ [3.20811905e-03 3.87065374e-01 6.09726507e-01]]
+onnx [[8.1538868e-01 1.8444584e-01 1.6544168e-04]
+ [8.6265337e-01 1.3713925e-01 2.0727392e-04]
+ [9.0453798e-01 9.5380947e-02 8.1110506e-05]
+ [4.9857341e-02 3.2515183e-01 6.2499082e-01]
+ [3.2081197e-03 3.8706535e-01 6.0972649e-01]]
 --validation--
-sklearn [1 0 1 0 0]
-onnx [1 0 1 0 0]
+sklearn [1 1 1 0 0]
+onnx [1 1 1 0 0]
 

It looks good.

-
-
-

Display the ONNX graph

+ +
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer(
@@ -701,14 +581,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot') -image = plt.imread("validator_classifier.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("validator_classifier.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot custom parser

Out:

-
(-0.5, 2643.5, 4934.5, -0.5)
+plot custom parser
(-0.5, 3160.5, 4934.5, -0.5)
 

Versions used for this example

@@ -719,19 +598,15 @@

Display the ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 2.490 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_custom_parser_alternative.html b/auto_examples/plot_custom_parser_alternative.html index fd2b9cbbe..a52c1cc62 100644 --- a/auto_examples/plot_custom_parser_alternative.html +++ b/auto_examples/plot_custom_parser_alternative.html @@ -1,406 +1,291 @@ - - - - - - - - When a custom model is neither a classifier nor a regressor (alternative) — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + When a custom model is neither a classifier nor a regressor (alternative) - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

When a custom model is neither a classifier nor a regressor (alternative)

+
+

When a custom model is neither a classifier nor a regressor (alternative)#

Note

This example rewrites When a custom model is neither a classifier nor a regressor by using @@ -413,18 +298,8 @@ to add a third result which tells if the probability is above a given threshold. That’s implemented in method validate.

- -
-

Iris and scoring

+
+

Iris and scoring#

A new class is created, it trains any classifier and implements the method validate mentioned above.

-

Out:

-
ValidatorClassifier(estimator=LogisticRegression(solver='liblinear'))
-
+
+
ValidatorClassifier(estimator=LogisticRegression(solver='liblinear'))
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-

Let’s now measure the indicator which tells +
+

Let’s now measure the indicator which tells if the probability of a prediction is above a threshold.

print(model.validate(X_test))
 
-

Out:

-
[0 0 0 0 0 1 1 1 1 0 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 0 0 0 0 1 0 0 0 0 0 1
- 0]
+
[1 1 1 0 1 0 0 0 1 1 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 0 0 0 0 0 1 0 1 0 1 0
+ 1]
 
-
-
+
+

Conversion to ONNX#

The conversion fails for a new model because the library does not know any converter associated to this new model.

@@ -517,7 +391,6 @@

Conversion to ONNXprint(e)

-

Out:

+
+

Custom converter#

We reuse some pieces of code from Write your own converter for your own model. The shape calculator defines the shape of every output of the converted model.

@@ -567,7 +440,7 @@

Custom converteronnx_op = OnnxSubEstimator(model, input0, op_version=opv, options={'zipmap': False}) - rmax = OnnxReduceMax(onnx_op[1], axes=[1], keepdims=0, op_version=opv) + rmax = OnnxReduceMaxApi18(onnx_op[1], axes=[1], keepdims=0, op_version=opv) great = OnnxGreater(rmax, np.array([op.threshold], dtype=np.float32), op_version=opv) valid = OnnxCast(great, to=onnx_proto.TensorProto.INT64, @@ -599,7 +472,6 @@

Custom converterprint(e)

-

Out:

3 outputs expected not 2.
 
@@ -607,9 +479,9 @@

Custom converter -

-
-

Custom parser

+ +
+

Custom parser#

def validator_classifier_parser(scope, model, inputs, custom_parsers=None):
     alias = get_model_alias(type(model))
     this_operator = scope.declare_local_operator(alias, model)
@@ -641,9 +513,9 @@ 

Custom parsertarget_opset=12)

-
-
-

Final test

+ +
+

Final test#

We need now to check the results are the same with ONNX.

X32 = X_test[:5].astype(np.float32)
 
@@ -661,30 +533,29 @@ 

Final testprint("onnx", results[2])

-

Out:

--labels--
-sklearn [1 2 2 1 2]
-onnx [1 2 2 1 2]
+sklearn [2 0 0 2 0]
+onnx [2 0 0 2 0]
 --probabilities--
-sklearn [[0.00112034 0.52456535 0.47431431]
- [0.00132435 0.28674876 0.71192689]
- [0.0007723  0.33584156 0.66338614]
- [0.04540803 0.63252491 0.32206707]
- [0.00477428 0.37438644 0.62083927]]
-onnx [[0.00112029 0.5245654  0.47431436]
- [0.00132436 0.2867488  0.7119269 ]
- [0.00077231 0.3358416  0.6633861 ]
- [0.04540805 0.6325249  0.32206705]
- [0.00477428 0.37438646 0.62083924]]
+sklearn [[6.57906777e-04 1.91959240e-01 8.07382854e-01]
+ [8.72054450e-01 1.27893759e-01 5.17916588e-05]
+ [8.53658204e-01 1.46046803e-01 2.94993135e-04]
+ [2.73275979e-04 3.98725606e-01 6.01001118e-01]
+ [9.70430915e-01 2.95535348e-02 1.55504186e-05]]
+onnx [[6.5787166e-04 1.9195931e-01 8.0738282e-01]
+ [8.7205452e-01 1.2789373e-01 5.1797400e-05]
+ [8.5365826e-01 1.4604677e-01 2.9501395e-04]
+ [2.7327205e-04 3.9872569e-01 6.0100102e-01]
+ [9.7043103e-01 2.9553531e-02 1.5552911e-05]]
 --validation--
-sklearn [0 0 0 0 0]
-onnx [0 0 0 0 0]
+sklearn [1 1 1 0 1]
+onnx [1 1 1 0 1]
 

It looks good.

-
-
-

Display the ONNX graph

+ +
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer(
@@ -693,14 +564,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng validator_classifier.dot') -image = plt.imread("validator_classifier.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("validator_classifier.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot custom parser alternative

Out:

-
(-0.5, 2903.5, 4934.5, -0.5)
+plot custom parser alternative
(-0.5, 3414.5, 4934.5, -0.5)
 

Versions used for this example

@@ -711,19 +581,15 @@

Display the ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 2.654 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- + + + + + \ No newline at end of file diff --git a/auto_examples/plot_errors_onnxruntime.html b/auto_examples/plot_errors_onnxruntime.html index 17a101287..27caf93a5 100644 --- a/auto_examples/plot_errors_onnxruntime.html +++ b/auto_examples/plot_errors_onnxruntime.html @@ -1,372 +1,291 @@ - - - - - - - - Errors with onnxruntime — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Errors with onnxruntime - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - +
+
+ + + + +
+
+
+ + + + + Back to top + +
-
- - -
- - - -
- -
- -
- - +
+
- - - - - - -
- -
- - +
+ -
-

Errors with onnxruntime

+
+

Errors with onnxruntime#

Many mistakes might happen with onnxruntime. This example looks into several common situations in which onnxruntime does not return the model @@ -384,39 +303,38 @@ from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression try: - from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument + from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument except ImportError: # onnxruntime <= 0.5 - InvalidArgument = RuntimeError + InvalidArgument = RuntimeError data = load_iris() -clr = LogisticRegression().fit(data.data[:, :2], data.target) -with open("logreg_iris.onnx", "wb") as f: - f.write( +clr = LogisticRegression().fit(data.data[:, :2], data.target) +with open("logreg_iris.onnx", "wb") as f: + f.write( skl2onnx.to_onnx( - clr, data.data[:, :2].astype(np.float32), + clr, data.data[:, :2].astype(np.float32), target_opset=12).SerializeToString()) -example2 = "logreg_iris.onnx" -sess = rt.InferenceSession(example2) +example2 = "logreg_iris.onnx" +sess = rt.InferenceSession(example2) -input_name = sess.get_inputs()[0].name -output_name = sess.get_outputs()[0].name +input_name = sess.get_inputs()[0].name +output_name = sess.get_outputs()[0].name

The first example fails due to bad types. onnxruntime only expects single floats (4 bytes) and cannot handle any other kind of floats.

try:
-    x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]],
-                 dtype=np.float64)
-    sess.run([output_name], {input_name: x})
+    x = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]],
+                 dtype=np.float64)
+    sess.run([output_name], {input_name: x})
 except Exception as e:
     print("Unexpected type")
     print("{0}: {1}".format(type(e), e))
 
-

Out:

Unexpected type
 <class 'onnxruntime.capi.onnxruntime_pybind11_state.InvalidArgument'>: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float))
 
@@ -424,74 +342,70 @@

The model fails to return an output if the name is misspelled.

try:
-    x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
-    sess.run(["misspelled"], {input_name: x})
+    x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
+    sess.run(["misspelled"], {input_name: x})
 except Exception as e:
     print("Misspelled output name")
     print("{0}: {1}".format(type(e), e))
 
-

Out:

Misspelled output name
 <class 'onnxruntime.capi.onnxruntime_pybind11_state.InvalidArgument'>: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid Output Name:misspelled
 

The output name is optional, it can be replaced by None and onnxruntime will then return all the outputs.

-
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
-res = sess.run(None, {input_name: x})
+
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
+res = sess.run(None, {input_name: x})
 print("All outputs")
-print(res)
+print(res)
 
-

Out:

All outputs
 [array([0, 0, 0], dtype=int64), [{0: 0.9999734163284302, 1: 2.656836477399338e-05, 2: 5.484377840758725e-09}, {0: 0.9999914169311523, 1: 8.446793799521402e-06, 2: 1.7366836857490853e-07}, {0: 0.9999918341636658, 1: 2.6854097541217925e-06, 2: 5.499288818100467e-06}]]
 

The same goes if the input name is misspelled.

try:
-    x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
-    sess.run([output_name], {"misspelled": x})
+    x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
+    sess.run([output_name], {"misspelled": x})
 except Exception as e:
     print("Misspelled input name")
     print("{0}: {1}".format(type(e), e))
 
-

Out:

Misspelled input name
 <class 'onnxruntime.capi.onnxruntime_pybind11_state.InvalidArgument'>: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid Feed Input Name:misspelled
 

onnxruntime does not necessarily fail if the input dimension is a multiple of the expected input dimension.

-
for x in [
-        np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
-        np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),
-        np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
-        np.array([1.0, 2.0, 3.0], dtype=np.float32),
-        np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:
+
for x in [
+        np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
+        np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),
+        np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
+        np.array([1.0, 2.0, 3.0], dtype=np.float32),
+        np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:
     try:
-        r = sess.run([output_name], {input_name: x})
-        print("Shape={0} and predicted labels={1}".format(x.shape, r))
-    except (RuntimeError, InvalidArgument) as e:
-        print("Shape={0} and error={1}".format(x.shape, e))
-
-for x in [
-        np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
-        np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),
-        np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
-        np.array([1.0, 2.0, 3.0], dtype=np.float32),
-        np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:
+        r = sess.run([output_name], {input_name: x})
+        print("Shape={0} and predicted labels={1}".format(x.shape, r))
+    except (RuntimeError, InvalidArgument) as e:
+        print("Shape={0} and error={1}".format(x.shape, e))
+
+for x in [
+        np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
+        np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),
+        np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
+        np.array([1.0, 2.0, 3.0], dtype=np.float32),
+        np.array([[1.0, 2.0, 3.0]], dtype=np.float32)]:
     try:
-        r = sess.run(None, {input_name: x})
+        r = sess.run(None, {input_name: x})
         print("Shape={0} and predicted probabilities={1}".format(
-            x.shape, r[1]))
-    except (RuntimeError, InvalidArgument) as e:
-        print("Shape={0} and error={1}".format(x.shape, e))
+            x.shape, r[1]))
+    except (RuntimeError, InvalidArgument) as e:
+        print("Shape={0} and error={1}".format(x.shape, e))
 
-

Out:

Shape=(4,) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 1 Expected: 2 Please fix either the inputs or the model.
 Shape=(1, 4) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Got invalid dimensions for input: X for the following indices
  index: 1 Got: 4 Expected: 2
@@ -514,44 +428,39 @@
 

It does not fail either if the number of dimension is higher than expects but produces a warning.

-
for x in [
-        np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32),
-        np.array([[[1.0, 2.0, 3.0]]], dtype=np.float32),
-        np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=np.float32)]:
+
for x in [
+        np.array([[[1.0, 2.0], [3.0, 4.0]]], dtype=np.float32),
+        np.array([[[1.0, 2.0, 3.0]]], dtype=np.float32),
+        np.array([[[1.0, 2.0]], [[3.0, 4.0]]], dtype=np.float32)]:
     try:
-        r = sess.run([output_name], {input_name: x})
-        print("Shape={0} and predicted labels={1}".format(x.shape, r))
-    except (RuntimeError, InvalidArgument) as e:
-        print("Shape={0} and error={1}".format(x.shape, e))
+        r = sess.run([output_name], {input_name: x})
+        print("Shape={0} and predicted labels={1}".format(x.shape, r))
+    except (RuntimeError, InvalidArgument) as e:
+        print("Shape={0} and error={1}".format(x.shape, e))
 
-

Out:

Shape=(1, 2, 2) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 3 Expected: 2 Please fix either the inputs or the model.
 Shape=(1, 1, 3) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 3 Expected: 2 Please fix either the inputs or the model.
 Shape=(2, 1, 2) and error=[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Invalid rank for input: X Got: 3 Expected: 2 Please fix either the inputs or the model.
 

Versions used for this example

-
print("numpy:", np.__version__)
-print("scikit-learn:", sklearn.__version__)
-print("onnx: ", onnx.__version__)
-print("onnxruntime: ", rt.__version__)
-print("skl2onnx: ", skl2onnx.__version__)
+
print("numpy:", np.__version__)
+print("scikit-learn:", sklearn.__version__)
+print("onnx: ", onnx.__version__)
+print("onnxruntime: ", rt.__version__)
+print("skl2onnx: ", skl2onnx.__version__)
 
-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.064 seconds)

-

Gallery generated by Sphinx-Gallery

-
- + + +
+
- - -
-
- - -
-
- - - -
- +
- - +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_gpr.html b/auto_examples/plot_gpr.html index 5fd37e918..773893145 100644 --- a/auto_examples/plot_gpr.html +++ b/auto_examples/plot_gpr.html @@ -1,430 +1,304 @@ - - - - - - - - Discrepencies with GaussianProcessorRegressor: use of double — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Discrepencies with GaussianProcessorRegressor: use of double - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Discrepencies with GaussianProcessorRegressor: use of double

+
+

Discrepencies with GaussianProcessorRegressor: use of double#

The GaussianProcessRegressor involves many matrix operations which may requires double precisions. sklearn-onnx is using single floats by default but for this particular model, it is better to use double. Let’s see how to create an ONNX file using doubles.

- -
-

Train a model

+
+

Train a model#

A very basic example using GaussianProcessRegressor on the Boston dataset.

import pprint
 import numpy
 import sklearn
-from sklearn.datasets import load_boston
+from sklearn.datasets import load_diabetes
 from sklearn.gaussian_process import GaussianProcessRegressor
 from sklearn.gaussian_process.kernels import DotProduct, RBF
 from sklearn.model_selection import train_test_split
@@ -434,22 +308,25 @@ 

Train a modelfrom skl2onnx.common.data_types import FloatTensorType, DoubleTensorType from skl2onnx import convert_sklearn -bost = load_boston() -X, y = bost.data, bost.target +dataset = load_diabetes() +X, y = dataset.data, dataset.target X_train, X_test, y_train, y_test = train_test_split(X, y) gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.) gpr.fit(X_train, y_train) print(gpr)

-

Out:

-
GaussianProcessRegressor(alpha=1.0,
+
/home/xadupre/github/scikit-learn/sklearn/gaussian_process/kernels.py:430: ConvergenceWarning: The optimal value found for dimension 0 of parameter k1__sigma_0 is close to the specified upper bound 100000.0. Increasing the bound and calling fit again may find a better value.
+  warnings.warn(
+/home/xadupre/github/scikit-learn/sklearn/gaussian_process/kernels.py:420: ConvergenceWarning: The optimal value found for dimension 0 of parameter k2__length_scale is close to the specified lower bound 1e-05. Decreasing the bound and calling fit again may find a better value.
+  warnings.warn(
+GaussianProcessRegressor(alpha=1.0,
                          kernel=DotProduct(sigma_0=1) + RBF(length_scale=1))
 
-
-
+
+

First attempt to convert a model into ONNX#

The documentation suggests the following way to convert a model into ONNX.

initial_type = [('X', FloatTensorType([None, X_train.shape[1]]))]
@@ -464,9 +341,9 @@ 

First attempt to convert a model into ONN print(str(e))

-
-
+
+

Second attempt: variable dimensions#

Unfortunately, even though the conversion went well, the runtime fails to compute the prediction. The previous snippet of code imposes fixed dimension @@ -489,10 +366,9 @@

Second attempt: variable dimensionsprint(pred_onx[0, :10])

-

Out:

-
[12.98711124 23.10666335 22.47744493 21.88389996 17.86881722 21.01216776
- 20.14724588 21.67807254 12.97405285 21.51467395]
-[13.171875]
+
[155.86157227 161.37359619 169.98284912 163.67077637 179.60345459
+ 112.92480469 140.47344971 107.94818115 131.79040527 178.3425293 ]
+[270336.]
 

The differences seems quite important. @@ -504,14 +380,14 @@

Second attempt: variable dimensionsprint('min(Y)-max(Y):', min(y_test), max(y_test))

-

Out:

-
[4.01849596 4.33550733 4.49612665 4.617328   5.72348013]
-min(Y)-max(Y): 6.3 50.0
+
[270230.27374268 270231.13311768 270231.22369385 270233.97131348
+ 270236.3505249 ]
+min(Y)-max(Y): 42.0 346.0
 
-
-
-

Third attempt: use of double

+ +
+

Third attempt: use of double#

The model uses a couple of matrix computations and matrices have coefficients with very different order of magnitude. It is difficult to approximate @@ -534,8 +410,7 @@

Third attempt: use of doubleprint(pred_onx64[0, :10])

-

Out:

-
[12.98711125]
+
[155.86221577]
 

The new differences look much better.

@@ -545,15 +420,13 @@

Third attempt: use of doubleprint('min(Y)-max(Y):', min(y_test), max(y_test))

-

Out:

-
[1.17297745e-08 1.17420953e-08 1.32981626e-08 1.35561606e-08
- 1.73767951e-08]
-min(Y)-max(Y): 6.3 50.0
+
[0.0048411  0.00493731 0.00496565 0.00522617 0.00536676]
+min(Y)-max(Y): 42.0 346.0
 
-
-
-

Size increase

+ +
+

Size increase#

As a result, the ONNX model is almost twice bigger because every coefficient is stored as double and and not as floats anymore.

@@ -563,14 +436,13 @@

Size increaseprint("ONNX with doubles:", size64)

-

Out:

-
ONNX with floats: 42944
-ONNX with doubles: 83948
+
ONNX with floats: 29814
+ONNX with doubles: 57694
 
-
-
-

return_std=True

+ +
+

return_std=True#

GaussianProcessRegressor is one model which defined additional parameter to the predict function. If call with return_std=True, the class returns one more results @@ -601,22 +473,22 @@

return_std=Truepprint.pprint(pred_onx64_std)

-

Out:

-
[array([[12.98711125],
-       [23.10666336],
-       [22.47744494],
-       [21.88389997],
-       [17.86881722]]),
- array([1.04323729, 0.77377006, 0.94038624, 0.71791752, 1.03419019])]
+
[array([[155.86221577],
+       [161.37604364],
+       [169.97800802],
+       [163.66931185],
+       [179.59932504]]),
+ array([589.99334399,   0.        ,   0.        , 502.71732585,
+         0.        ])]
 

Let’s compare with scikit-learn prediction.

pprint.pprint(gpr.predict(X_test[:5], return_std=True))
 
-

Out:

-
(array([12.98711124, 23.10666335, 22.47744493, 21.88389996, 17.86881722]),
- array([1.04275194, 0.77250173, 0.94002845, 0.71741222, 1.03399699]))
+
(array([155.86157227, 161.37359619, 169.98284912, 163.67077637,
+       179.60174561]),
+ array([1.01325219, 1.00510439, 1.00620633, 1.0110021 , 1.00937738]))
 

It looks good. Let’s do a better checks.

@@ -629,8 +501,7 @@

return_std=Trueprint(diff)

-

Out:

-
[0.00105591 0.00119079 0.00126833 0.00140884 0.00160353]
+
[717.44499291 726.51668442 729.78735343 737.89521503 840.48277943]
 

There are some discrepencies but it seems reasonable.

@@ -642,19 +513,15 @@

return_std=Trueprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.753 seconds)

- -
- + + + +
+
- - -
- -
- +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_intermediate_outputs.html b/auto_examples/plot_intermediate_outputs.html index 06bb7bc17..0ef972178 100644 --- a/auto_examples/plot_intermediate_outputs.html +++ b/auto_examples/plot_intermediate_outputs.html @@ -1,425 +1,299 @@ + + + + + + - - - - - - - Walk through intermediate outputs — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + Walk through intermediate outputs - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ +
+ +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Walk through intermediate outputs

+
+

Walk through intermediate outputs#

We reuse the example Convert a pipeline with ColumnTransformer and walk through intermediates outputs. It is very likely a converted model gives different outputs or fails due to a custom converter which is not correctly implemented. One option is to look into the output of every node of the ONNX graph.

- -
-

Create and train a complex pipeline

+
+

Create and train a complex pipeline#

We reuse the pipeline implemented in example Column Transformer with Mixed Types. There is one change because @@ -488,32 +362,51 @@

Create and train a complex pipelineclf.fit(X_train, y_train)

-

Out:

-
Pipeline(steps=[('preprocessor',
-                 ColumnTransformer(transformers=[('num',
-                                                  Pipeline(steps=[('imputer',
-                                                                   SimpleImputer(strategy='median')),
-                                                                  ('scaler',
+
+
Pipeline(steps=[('preprocessor',
+                 ColumnTransformer(transformers=[('num',
+                                                  Pipeline(steps=[('imputer',
+                                                                   SimpleImputer(strategy='median')),
+                                                                  ('scaler',
                                                                    StandardScaler())]),
-                                                  ['age', 'fare']),
-                                                 ('cat',
-                                                  Pipeline(steps=[('onehot',
-                                                                   OneHotEncoder(handle_unknown='ignore'))]),
-                                                  ['embarked', 'sex',
-                                                   'pclass'])])),
-                ('classifier', LogisticRegression())])
-
-
+ ['age', 'fare']), + ('cat', + Pipeline(steps=[('onehot', + OneHotEncoder(handle_unknown='ignore'))]), + ['embarked', 'sex', + 'pclass'])])), + ('classifier', LogisticRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-

Define the inputs of the ONNX graph

+
+
+
+

Define the inputs of the ONNX graph#

sklearn-onnx does not know the features used to train the model but it needs to know which feature has which name. We simply reuse the dataframe column definition.

-

Out:

pclass         int64
 name          object
 sex           object
@@ -551,7 +444,6 @@ 

Define the inputs of the ONNX graphpprint.pprint(inputs)

-

Out:

[('pclass', Int64TensorType(shape=[None, 1])),
  ('name', StringTensorType(shape=[None, 1])),
  ('sex', StringTensorType(shape=[None, 1])),
@@ -570,9 +462,9 @@ 

Define the inputs of the ONNX graphMerging single column into vectors is not the most efficient way to compute the prediction. It could be done before converting the pipeline into a graph.

-

-
+
+

Convert the pipeline into ONNX#

try:
     model_onnx = convert_sklearn(clf, 'pipeline_titanic', inputs,
                                  target_opset=12)
@@ -598,9 +490,9 @@ 

Convert the pipeline into ONNXf.write(model_onnx.SerializeToString())

-
-
-

Compare the predictions

+ +
+

Compare the predictions#

Final step, we need to ensure the converted model produces the same predictions, labels and probabilities. Let’s start with scikit-learn.

@@ -608,9 +500,8 @@

Compare the predictionsprint("predict_proba", clf.predict_proba(X_test[:1]))

-

Out:

-
predict [0 0 0 0 0]
-predict_proba [[0.73518679 0.26481321]]
+
predict [0 1 0 1 0]
+predict_proba [[0.76688174 0.23311826]]
 

Predictions with onnxruntime. @@ -637,14 +528,13 @@

Compare the predictionsprint("predict_proba", pred_onx[1][:1])

-

Out:

-
predict [0 0 0 0 0]
-predict_proba [{0: 0.8771912455558777, 1: 0.12280875444412231}]
+
predict [0 1 0 0 0]
+predict_proba [{0: 0.9036112427711487, 1: 0.09638875722885132}]
 
-
-
-

Compute intermediate outputs

+ +
+

Compute intermediate outputs#

Unfortunately, there is actually no way to ask onnxruntime to retrieve the output of intermediate nodes. We need to modifies the ONNX before it is given to onnxruntime. @@ -654,24 +544,19 @@

Compute intermediate outputsprint(out)

-

Out:

-
fare_cast
-age_cast
-pclassout
-sexout
+
merged_columns
 embarkedout
+sexout
+pclassout
 concat_result
-merged_columns
 variable
 variable2
 variable1
-variable2_cast
-variable1_cast
 transformed_column
 label
 probability_tensor
-probabilities
 output_label
+probabilities
 output_probability
 
@@ -685,8 +570,7 @@

Compute intermediate outputssave_onnx_model(num_onnx, "pipeline_titanic_numerical.onnx")

-

Out:

-
b'\x08\x07\x12\x08skl2onnx\x1a\x041.11"\x07ai.onnx(\x002\x00:\xae\x04\n^\n\x08variable\x12\tvariable1\x1a\x06Scaler"\x06Scaler*\x15\n\x06offset=U0\xebA=\'%\x05B\xa0\x01\x06*\x14\n\x05scale=\xcc\xe5\x9f==G.\xa0<\xa0\x01\x06:\nai.onnx.ml\n}\n\x0emerged_columns\x12\x08variable\x1a\x07Imputer"\x07Imputer*#\n\x14imputed_value_floats=\x00\x00\xe0A=gDgA\xa0\x01\x06*\x1e\n\x14replaced_value_float\x15\x00\x00\xc0\x7f\xa0\x01\x01:\nai.onnx.ml\nD\n\x08age_cast\n\tfare_cast\x12\x0emerged_columns\x1a\x06Concat"\x06Concat*\x0b\n\x04axis\x18\x01\xa0\x01\x02:\x00\n(\n\x03age\x12\x08age_cast\x1a\x04Cast"\x04Cast*\t\n\x02to\x18\x01\xa0\x01\x02:\x00\n+\n\x04fare\x12\tfare_cast\x1a\x05Cast1"\x04Cast*\t\n\x02to\x18\x01\xa0\x01\x02:\x00\x12\x10pipeline_titanic*\x1f\x08\x02\x10\x07:\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\tB\x0cshape_tensorZ\x16\n\x06pclass\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03sex\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03age\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x14\n\x04fare\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x18\n\x08embarked\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01b\x0b\n\tvariable1B\x04\n\x00\x10\x0bB\x0e\n\nai.onnx.ml\x10\x01'
+
b'\x08\x07\x12\x08skl2onnx\x1a\x061.14.0"\x07ai.onnx(\x002\x00:\xcd\x03\n:\n\x03age\n\x04fare\x12\x0emerged_columns\x1a\x06Concat"\x06Concat*\x0b\n\x04axis\x18\x01\xa0\x01\x02:\x00\n}\n\x0emerged_columns\x12\x08variable\x1a\x07Imputer"\x07Imputer*#\n\x14imputed_value_floats=\x00\x00\xe0A=\xcdLgA\xa0\x01\x06*\x1e\n\x14replaced_value_float\x15\x00\x00\xc0\x7f\xa0\x01\x01:\nai.onnx.ml\n^\n\x08variable\x12\tvariable1\x1a\x06Scaler"\x06Scaler*\x15\n\x06offset=l\xde\xebA=J\xad\x07B\xa0\x01\x06*\x14\n\x05scale=\x88w\x9b==\x98\xca\x97<\xa0\x01\x06:\nai.onnx.ml\x12\x10pipeline_titanic*\x1f\x08\x02\x10\x07:\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\tB\x0cshape_tensorZ\x16\n\x06pclass\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03sex\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01Z\x13\n\x03age\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x14\n\x04fare\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\x01Z\x18\n\x08embarked\x12\x0c\n\n\x08\x08\x12\x06\n\x00\n\x02\x08\x01b\x0b\n\tvariable1B\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x0b'
 

Let’s compute the numerical features.

@@ -695,8 +579,7 @@

Compute intermediate outputsprint("numerical features", numX[0][:1])

-

Out:

-
numerical features [[-0.7337959  -0.49345303]]
+
numerical features [[ 0.19102357 -0.4848954 ]]
 

We do the same for the textual features.

@@ -708,48 +591,37 @@

Compute intermediate outputsprint("textual features", numT[0][:1])

-

Out:

ir_version: 7
 producer_name: "skl2onnx"
-producer_version: "1.11"
+producer_version: "1.14.0"
 domain: "ai.onnx"
 model_version: 0
 doc_string: ""
 graph {
-  node {
-    input: "fare"
-    output: "fare_cast"
-    name: "Cast1"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
   node {
     input: "age"
-    output: "age_cast"
-    name: "Cast"
-    op_type: "Cast"
+    input: "fare"
+    output: "merged_columns"
+    name: "Concat"
+    op_type: "Concat"
     attribute {
-      name: "to"
+      name: "axis"
       i: 1
       type: INT
     }
     domain: ""
   }
   node {
-    input: "pclass"
-    output: "pclassout"
-    name: "OneHotEncoder2"
+    input: "embarked"
+    output: "embarkedout"
+    name: "OneHotEncoder"
     op_type: "OneHotEncoder"
     attribute {
       name: "cats_strings"
-      strings: "1"
-      strings: "2"
-      strings: "3"
+      strings: "C"
+      strings: "Q"
+      strings: "S"
+      strings: "missing"
       type: STRINGS
     }
     attribute {
@@ -778,16 +650,15 @@ 

Compute intermediate outputsCompute intermediate outputsCompute intermediate outputsCompute intermediate outputsCompute intermediate outputsCompute intermediate outputsCompute intermediate outputsCompute intermediate outputs

-
-
-

Display the sub-ONNX graph

+ +
+

Display the sub-ONNX graph#

Finally, let’s see both subgraphs. First, numerical pipeline.

pydot_graph = GetPydotGraph(
     num_onnx.graph, name=num_onnx.graph.name, rankdir="TB",
@@ -1136,14 +970,13 @@ 

Display the sub-ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_num.dot') -image = plt.imread("pipeline_titanic_num.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_titanic_num.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot intermediate outputs

Out:

-
(-0.5, 4501.5, 1033.5, -0.5)
+plot intermediate outputs
(-0.5, 1229.5, 2558.5, -0.5)
 

Then textual pipeline.

@@ -1155,14 +988,13 @@

Display the sub-ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_titanic_text.dot') -image = plt.imread("pipeline_titanic_text.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_titanic_text.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot intermediate outputs

Out:

-
(-0.5, 7086.5, 1121.5, -0.5)
+plot intermediate outputs
(-0.5, 5630.5, 2735.5, -0.5)
 

Versions used for this example

@@ -1173,19 +1005,15 @@

Display the sub-ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 3.682 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- + + + + + \ No newline at end of file diff --git a/auto_examples/plot_investigate_pipeline.html b/auto_examples/plot_investigate_pipeline.html index d4fd644bd..765de1c70 100644 --- a/auto_examples/plot_investigate_pipeline.html +++ b/auto_examples/plot_investigate_pipeline.html @@ -1,409 +1,295 @@ - - - - - - - - Investigate a pipeline — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Investigate a pipeline - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - -
- - -
- -
- On this page +
- -
- -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Investigate a pipeline

+
+

Investigate a pipeline#

The following example shows how to look into a converted models and easily find errors at every step of the pipeline.

- -
-

Create a pipeline

+
+

Create a pipeline#

We reuse the pipeline implemented in example Pipelining: chaining a PCA and a logistic regression. There is one change because @@ -437,8 +323,7 @@

Create a pipelinepipe.fit(X_digits, y_digits)

-

Out:

-
C:\xadupre\github\scikit-learn\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1):
+
/home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):
 STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
 
 Increase the number of iterations (max_iter) or scale the data as shown in:
@@ -446,13 +331,15 @@ 

Create a pipeline +
Pipeline(steps=[('pca', PCA()), ('logistic', LogisticRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.

-
-

Conversion to ONNX

+
+
+
+

Conversion to ONNX#

initial_types = [('input', FloatTensorType((None, X_digits.shape[1])))]
 model_onnx = convert_sklearn(pipe, initial_types=initial_types,
                              target_opset=12)
@@ -466,26 +353,25 @@ 

Conversion to ONNXprint(df.values)

-

Out:

skl predict_proba
-[[9.99998536e-01 5.99063532e-19 3.48549155e-10 1.55765807e-08
-  3.32559909e-10 1.21314723e-06 3.98960115e-08 1.22513893e-07
-  2.23871287e-08 4.98148626e-08]
- [1.47648497e-14 9.99999301e-01 1.05811969e-10 7.49298734e-13
-  2.48627456e-07 8.75686305e-12 5.39025172e-11 2.95899962e-11
-  4.50528996e-07 1.30607533e-13]]
+[[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08
+  3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07
+  2.23871272e-08 4.98148509e-08]
+ [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13
+  2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11
+  4.50528833e-07 1.30607478e-13]]
 onnx predict_proba
 [[9.99998569e-01 5.99062501e-19 3.48550355e-10 1.55766493e-08
-  3.32561811e-10 1.21315361e-06 3.98961184e-08 1.22514706e-07
+  3.32561811e-10 1.21315134e-06 3.98961930e-08 1.22514706e-07
   2.23872494e-08 4.98151529e-08]
  [1.47648956e-14 9.99999285e-01 1.05811991e-10 7.49297488e-13
-  2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95900075e-11
-  4.50528631e-07 1.30607344e-13]]
+  2.48627885e-07 8.75685548e-12 5.39024415e-11 2.95899520e-11
+  4.50529058e-07 1.30607344e-13]]
 
-
-
-

Intermediate steps

+ +
+

Intermediate steps#

Let’s imagine the final output is wrong and we need to look into each component of the pipeline which one is failing. The following method modifies the scikit-learn @@ -510,7 +396,6 @@

Intermediate stepsprint(onnx_outputs)

-

Out:

step 1 <class 'sklearn.decomposition._pca.PCA'>
 skl outputs
 {'transform': array([[-9.78697129e+00,  7.22639567e+00, -2.16935601e+01,
@@ -533,7 +418,7 @@ 

Intermediate stepsIntermediate stepsIntermediate stepsIntermediate steps -

Pickle

+ +
+

Pickle#

Each steps is a separate model in the pipeline. It can be pickle independetly from the others. Attribute _debug contains all the information @@ -649,13 +534,12 @@

Pickleprint(restored['model'].predict_proba(restored['data_input']['predict_proba']))

-

Out:

-
[[9.99998536e-01 5.99063532e-19 3.48549155e-10 1.55765807e-08
-  3.32559909e-10 1.21314723e-06 3.98960115e-08 1.22513893e-07
-  2.23871287e-08 4.98148626e-08]
- [1.47648497e-14 9.99999301e-01 1.05811969e-10 7.49298734e-13
-  2.48627456e-07 8.75686305e-12 5.39025172e-11 2.95899962e-11
-  4.50528996e-07 1.30607533e-13]]
+
[[9.99998536e-01 5.99063158e-19 3.48548953e-10 1.55765726e-08
+  3.32559745e-10 1.21314653e-06 3.98959930e-08 1.22513839e-07
+  2.23871272e-08 4.98148509e-08]
+ [1.47648437e-14 9.99999301e-01 1.05811967e-10 7.49298733e-13
+  2.48627417e-07 8.75686484e-12 5.39025135e-11 2.95899938e-11
+  4.50528833e-07 1.30607478e-13]]
 

Versions used for this example

@@ -666,19 +550,15 @@

Pickleprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.323 seconds)

- - + + +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_logging.html b/auto_examples/plot_logging.html index 19e316d74..8c16f6241 100644 --- a/auto_examples/plot_logging.html +++ b/auto_examples/plot_logging.html @@ -1,411 +1,297 @@ + + + + + + - - - - - - - Logging, verbose — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + Logging, verbose - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Logging, verbose

+
+

Logging, verbose#

The conversion of a pipeline fails if it contains an object without any associated converter. It may also fails if one of the object is mapped by a custom converter. If the error message is not explicit enough, it is possible to enable logging.

- -
-

Train a model

+
+

Train a model#

A very basic example using random forest and the iris dataset.

import logging
@@ -428,13 +314,12 @@ 

Train a modelprint(clr)

-

Out:

DecisionTreeClassifier()
 
-
-
+
+

Convert a model into ONNX#

initial_type = [('float_input', FloatTensorType([None, 4]))]
 onx = convert_sklearn(clr, initial_types=initial_type,
                       target_opset=12)
@@ -448,14 +333,13 @@ 

Convert a model into ONNXprint(pred_onx)

-

Out:

-
[0 2 0 1 0 2 2 1 2 0 0 0 2 1 1 2 2 2 2 1 2 0 0 1 1 0 0 1 0 0 1 1 1 0 0 2 1
- 2]
+
[1 0 1 1 2 0 2 0 2 0 1 1 1 0 2 0 2 1 2 1 2 0 0 0 2 0 0 0 2 1 2 1 1 0 2 2 2
+ 1]
 
-
-
+
+

Conversion with parameter verbose#

verbose is a parameter which prints messages on the standard output. It tells which converter is called. verbose=1 usually means what skl2onnx is doing to convert a pipeline. verbose=2+ @@ -463,7 +347,6 @@

Conversion with parameter verbose

-

Out:

-
-
-

Conversion with logging

+ +
+

Conversion with logging#

This is very detailed logging. It which operators or variables (output of converters) is processed, which node is created… This information may be useful when a custom converter is being @@ -884,7 +677,6 @@

Conversion with loggingconvert_sklearn(clr, initial_types=initial_type, target_opset=12)

-

Out:

DEBUG:skl2onnx:[Var] +Variable('float_input', 'float_input', type=FloatTensorType(shape=[None, 4]))
 DEBUG:skl2onnx:[Var] update is_root=True for Variable('float_input', 'float_input', type=FloatTensorType(shape=[None, 4]))
 DEBUG:skl2onnx:[parsing] found alias='SklearnDecisionTreeClassifier' for type=<class 'sklearn.tree._classes.DecisionTreeClassifier'>.
@@ -937,7 +729,7 @@ 

Conversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingConversion with loggingconvert_sklearn(clr, initial_types=initial_type, target_opset=12)

-

Out:

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.158 seconds)

- - + + + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_metadata.html b/auto_examples/plot_metadata.html index ed5f8abc0..c25fc63ba 100644 --- a/auto_examples/plot_metadata.html +++ b/auto_examples/plot_metadata.html @@ -1,371 +1,291 @@ - - - - - - - - Metadata — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - + + + + + + + + + Metadata - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - +
+
+ + + + +
+
+
+ + + + + Back to top + +
-
- - -
- - - -
- -
- -
- - +
+
- - - - - - -
- -
- - +
+ -
-

Metadata

+
+

Metadata#

ONNX format contains metadata related to how the model was produced. It is useful when the model is deployed to production to keep track of which @@ -377,24 +297,23 @@ import onnxruntime import sklearn import numpy -from onnxruntime import InferenceSession +from onnxruntime import InferenceSession import onnx -from onnxruntime.datasets import get_example +from onnxruntime.datasets import get_example -example = get_example("logreg_iris.onnx") +example = get_example("logreg_iris.onnx") -model = onnx.load(example) +model = onnx.load(example) -print("doc_string={}".format(model.doc_string)) -print("domain={}".format(model.domain)) -print("ir_version={}".format(model.ir_version)) +print("doc_string={}".format(model.doc_string)) +print("domain={}".format(model.domain)) +print("ir_version={}".format(model.ir_version)) print("metadata_props={}".format(model.metadata_props)) -print("model_version={}".format(model.model_version)) -print("producer_name={}".format(model.producer_name)) -print("producer_version={}".format(model.producer_version)) +print("model_version={}".format(model.model_version)) +print("producer_name={}".format(model.producer_name)) +print("producer_version={}".format(model.producer_version))

-

Out:

doc_string=
 domain=onnxml
 ir_version=3
@@ -405,18 +324,17 @@
 

With ONNX Runtime:

-
sess = InferenceSession(example)
-meta = sess.get_modelmeta()
-
-print("custom_metadata_map={}".format(meta.custom_metadata_map))
-print("description={}".format(meta.description))
-print("domain={}".format(meta.domain))
-print("graph_name={}".format(meta.graph_name))
-print("producer_name={}".format(meta.producer_name))
-print("version={}".format(meta.version))
+
sess = InferenceSession(example)
+meta = sess.get_modelmeta()
+
+print("custom_metadata_map={}".format(meta.custom_metadata_map))
+print("description={}".format(meta.description))
+print("domain={}".format(meta.domain))
+print("graph_name={}".format(meta.graph_name))
+print("producer_name={}".format(meta.producer_name))
+print("version={}".format(meta.version))
 
-

Out:

custom_metadata_map={}
 description=
 domain=onnxml
@@ -426,26 +344,22 @@
 

Versions used for this example

-
print("numpy:", numpy.__version__)
-print("scikit-learn:", sklearn.__version__)
-print("onnx: ", onnx.__version__)
-print("onnxruntime: ", onnxruntime.__version__)
-print("skl2onnx: ", skl2onnx.__version__)
+
print("numpy:", numpy.__version__)
+print("scikit-learn:", sklearn.__version__)
+print("onnx: ", onnx.__version__)
+print("onnxruntime: ", onnxruntime.__version__)
+print("skl2onnx: ", skl2onnx.__version__)
 
-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.079 seconds)

-

Gallery generated by Sphinx-Gallery

-
- + + +
+
- - -
-
- - -
-
- - - -
- +
- - +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_nmf.html b/auto_examples/plot_nmf.html index 3eb8da156..eff299d52 100644 --- a/auto_examples/plot_nmf.html +++ b/auto_examples/plot_nmf.html @@ -1,390 +1,294 @@ - - - - - - - - Custom Operator for NMF Decomposition — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Custom Operator for NMF Decomposition - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - -
- - -
- -
- On this page +
- -
- -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Custom Operator for NMF Decomposition

+
+

Custom Operator for NMF Decomposition#

NMF factorizes an input matrix -into two matrices W, H of rank k so that WH \sim M. -M=(m_{ij}) may be a binary matrix where i is a user +into two matrices W, H of rank k so that WH \sim M`. +M=(m_{ij}) may be a binary matrix where i is a user and j a product he bought. The prediction function depends on whether or not the user needs a recommandation for an existing user or a new user. @@ -392,19 +296,13 @@

The second case is more complex as it theoretically requires the estimation of a new matrix W with a gradient descent.

- -
-

Building a simple model

+
+

Building a simple model#

import os
 import skl2onnx
 import onnxruntime
 import sklearn
-from sklearn.decomposition import NMF
+from sklearn.decomposition import NMF
 import numpy as np
 import matplotlib.pyplot as plt
 from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
@@ -419,7 +317,7 @@ 

Building a simple model[1, 0, 0, 0], [1, 0, 0, 0]], dtype=np.float64) mat[:mat.shape[1], :] += np.identity(mat.shape[1]) -mod = NMF(n_components=2) +mod = NMF(n_components=2) W = mod.fit_transform(mat) H = mod.components_ pred = mod.inverse_transform(W) @@ -433,9 +331,8 @@

Building a simple modelprint(exp)

-

Out:

original predictions
-[(0, 0, 1.8940619439633473), (0, 1, 0.3072432913109815), (0, 2, 0.1091000464503179), (0, 3, 0.3072432913109815), (1, 0, 1.1066037222104155), (1, 1, 0.19083096278248987), (1, 2, 0.0), (1, 3, 0.19083096278248987), (2, 0, 1.014668907902116), (2, 1, 0.0), (2, 2, 0.9848932612757917), (2, 3, 0.0), (3, 0, 1.1066037222104155), (3, 1, 0.19083096278248987), (3, 2, 0.0), (3, 3, 0.19083096278248987), (4, 0, 0.9470309719816736), (4, 1, 0.15362164565549075), (4, 2, 0.05455002322515895), (4, 3, 0.15362164565549075)]
+[(0, 0, 1.8940573361824338), (0, 1, 0.30724412295249387), (0, 2, 0.30724412295249387), (0, 3, 0.10910977984398254), (1, 0, 1.106606957298055), (1, 1, 0.19083366185515177), (1, 2, 0.19083366185515177), (1, 3, 0.0), (2, 0, 1.106606957298055), (2, 1, 0.19083366185515177), (2, 2, 0.19083366185515177), (2, 3, 0.0), (3, 0, 1.0146708954543895), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848905236525014), (4, 0, 0.9470286680912169), (4, 1, 0.15362206147624693), (4, 2, 0.15362206147624693), (4, 3, 0.05455488992199127)]
 

Let’s rewrite the prediction in a way it is closer @@ -452,13 +349,12 @@

Building a simple modelprint(got)

-

Out:

-
[(0, 0, 1.8940619439633473), (0, 1, 0.3072432913109815), (0, 2, 0.1091000464503179), (0, 3, 0.3072432913109815), (1, 0, 1.1066037222104155), (1, 1, 0.19083096278248987), (1, 2, 0.0), (1, 3, 0.19083096278248987), (2, 0, 1.014668907902116), (2, 1, 0.0), (2, 2, 0.9848932612757917), (2, 3, 0.0), (3, 0, 1.1066037222104155), (3, 1, 0.19083096278248987), (3, 2, 0.0), (3, 3, 0.19083096278248987), (4, 0, 0.9470309719816736), (4, 1, 0.15362164565549075), (4, 2, 0.05455002322515895), (4, 3, 0.15362164565549075)]
+
[(0, 0, 1.8940573361824338), (0, 1, 0.30724412295249387), (0, 2, 0.30724412295249387), (0, 3, 0.10910977984398254), (1, 0, 1.106606957298055), (1, 1, 0.19083366185515177), (1, 2, 0.19083366185515177), (1, 3, 0.0), (2, 0, 1.106606957298055), (2, 1, 0.19083366185515177), (2, 2, 0.19083366185515177), (2, 3, 0.0), (3, 0, 1.0146708954543895), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848905236525014), (4, 0, 0.9470286680912169), (4, 1, 0.15362206147624693), (4, 2, 0.15362206147624693), (4, 3, 0.05455488992199127)]
 
-
-
-

Conversion into ONNX

+ +
+

Conversion into ONNX#

There is no implemented converter for NMF as the function we plan to convert is not transformer or a predictor. @@ -490,10 +386,9 @@

Conversion into ONNXprint(model_onnx)

-

Out:

-

Out:

-
[(0, 0, 1.8940619), (0, 1, 0.3072433), (0, 2, 0.109100044), (0, 3, 0.3072433), (1, 0, 1.1066036), (1, 1, 0.19083095), (1, 2, 0.0), (1, 3, 0.19083095), (2, 0, 1.014669), (2, 1, 0.0), (2, 2, 0.98489326), (2, 3, 0.0), (3, 0, 1.1066036), (3, 1, 0.19083095), (3, 2, 0.0), (3, 3, 0.19083095), (4, 0, 0.94703096), (4, 1, 0.15362164), (4, 2, 0.054550022), (4, 3, 0.15362164)]
+
[(0, 0, 1.8940574), (0, 1, 0.30724412), (0, 2, 0.30724412), (0, 3, 0.10910978), (1, 0, 1.106607), (1, 1, 0.19083367), (1, 2, 0.19083367), (1, 3, 0.0), (2, 0, 1.106607), (2, 1, 0.19083367), (2, 2, 0.19083367), (2, 3, 0.0), (3, 0, 1.014671), (3, 1, 0.0), (3, 2, 0.0), (3, 3, 0.9848906), (4, 0, 0.9470287), (4, 1, 0.15362206), (4, 2, 0.15362206), (4, 3, 0.05455489)]
 

The ONNX graph looks like the following.

@@ -641,13 +535,12 @@

Conversion into ONNXrankdir="TB", node_producer=GetOpNodeProducer("docstring")) pydot_graph.write_dot("graph_nmf.dot") os.system('dot -O -Tpng graph_nmf.dot') -image = plt.imread("graph_nmf.dot.png") -plt.imshow(image) -plt.axis('off') +image = plt.imread("graph_nmf.dot.png") +plt.imshow(image) +plt.axis('off')

-plot nmf

Out:

-
(-0.5, 1303.5, 846.5, -0.5)
+plot nmf
(-0.5, 1654.5, 846.5, -0.5)
 

Versions used for this example

@@ -658,19 +551,15 @@

Conversion into ONNXprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.428 seconds)

- - + + +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_onnx_operators.html b/auto_examples/plot_onnx_operators.html index ccae9ba14..c78c91e4e 100644 --- a/auto_examples/plot_onnx_operators.html +++ b/auto_examples/plot_onnx_operators.html @@ -1,397 +1,291 @@ - - - - - - - - Play with ONNX operators — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Play with ONNX operators - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Play with ONNX operators

+
+

Play with ONNX operators#

ONNX aims at describing most of the machine learning models implemented in scikit-learn but it does not necessarily describe the prediction function the same way scikit-learn does. @@ -405,16 +299,8 @@ graph: PythonAPIOverview.md. But it is quite verbose and makes it difficult to describe big graphs. sklearn-onnx implements a nicer way to test ONNX operators.

- -
-

ONNX Python API

+
+

ONNX Python API#

Let’s try the example given by ONNX documentation: ONNX Model Using Helper Functions. It relies on protobuf whose definition can be found @@ -462,9 +348,8 @@

ONNX Python APIprint('The model is checked!')

-

Out:

-
-
-

Same example with sklearn-onnx

+ +
+

Same example with sklearn-onnx#

Every operator has its own class in sklearn-onnx. The list is dynamically created based on the installed onnx package.

@@ -546,11 +431,10 @@

Same example with sklearn-onnxprint('The model is checked!')

-

Out:

-
-
-

Multiple operators

+ +
+

Multiple operators#

Let’s use the second example from the documentation.

# Preprocessing: create a model with two nodes, Y's shape is unknown
 node1 = helper.make_node('Transpose', ['X'], ['Y'], perm=[1, 0, 2])
@@ -676,7 +560,6 @@ 

Multiple operatorsprint(Y)

-

Out:

{'Tr_transposed0': array([[[ 0.,  1.,  2.,  3.],
         [ 4.,  5.,  6.,  7.],
         [ 8.,  9., 10., 11.]],
@@ -686,9 +569,9 @@ 

Multiple operators -

Display the ONNX graph

+

+
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_def.graph, name=model_def.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer("docstring", color="yellow",
@@ -697,14 +580,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_transpose2x.dot') -image = plt.imread("pipeline_transpose2x.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_transpose2x.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot onnx operators

Out:

-
(-0.5, 1266.5, 1707.5, -0.5)
+plot onnx operators
(-0.5, 1524.5, 1707.5, -0.5)
 

Versions used for this example

@@ -717,19 +599,15 @@

Display the ONNX graphprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 1.104 seconds)

- - +
+ + +
+
- - -
- - - + - - + + + + + \ No newline at end of file diff --git a/auto_examples/plot_pipeline.html b/auto_examples/plot_pipeline.html index b9895a1a3..be83d6fd7 100644 --- a/auto_examples/plot_pipeline.html +++ b/auto_examples/plot_pipeline.html @@ -1,400 +1,298 @@ - - - - - - - - Draw a pipeline — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Draw a pipeline - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
-
- - - - -
- - -
- -
- On this page +
- -
- -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Draw a pipeline

+
+

Draw a pipeline#

There is no other way to look into one model stored in ONNX format than looking into its node with onnx. This example demonstrates how to draw a model and to retrieve it in json format.

- -
-

Retrieve a model in JSON format

+
+

Retrieve a model in JSON format#

That’s the most simple way.

import skl2onnx
 import onnxruntime
@@ -422,10 +320,9 @@ 

Retrieve a model in JSON formatf.write(model.SerializeToString())

-

Out:

ir_version: 7
 producer_name: "skl2onnx"
-producer_version: "1.11"
+producer_version: "1.14.0"
 domain: "ai.onnx"
 model_version: 0
 graph {
@@ -500,9 +397,9 @@ 

Retrieve a model in JSON format

-
-
+
+

Draw a model with ONNX#

We use net_drawer.py included in onnx package. We use onnx to load the model @@ -523,18 +420,16 @@

Draw a model with ONNX
os.system('dot -O -Tpng graph.dot')
 

-

Out:

0
 

Which we display…

-
image = plt.imread("graph.dot.png")
-plt.imshow(image)
-plt.axis('off')
+
image = plt.imread("graph.dot.png")
+plt.imshow(image)
+plt.axis('off')
 
-plot pipeline

Out:

-
(-0.5, 389.5, 602.5, -0.5)
+plot pipeline
(-0.5, 431.5, 602.5, -0.5)
 

Versions used for this example

@@ -545,19 +440,15 @@

Draw a model with ONNXprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.801 seconds)

- - + + +
+
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_pipeline_lightgbm.html b/auto_examples/plot_pipeline_lightgbm.html index 5e5347660..1404745b4 100644 --- a/auto_examples/plot_pipeline_lightgbm.html +++ b/auto_examples/plot_pipeline_lightgbm.html @@ -1,419 +1,299 @@ - - - - - - - - Convert a pipeline with a LightGbm model — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Convert a pipeline with a LightGbm model - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Convert a pipeline with a LightGbm model

+
+

Convert a pipeline with a LightGbm model#

sklearn-onnx only converts scikit-learn models into ONNX but many libraries implement scikit-learn API so that their models can be included in a scikit-learn pipeline. This example considers a pipeline including a LightGbm model. sklearn-onnx can convert the whole pipeline as long as it knows the converter associated to a LGBMClassifier. Let’s see how to do it.

- -
-

Train a LightGBM classifier

+
+

Train a LightGBM classifier#

import lightgbm
 import onnxmltools
 import skl2onnx
@@ -449,14 +329,15 @@ 

Train a LightGBM classifierpipe.fit(X, y)

-

Out:

-
Pipeline(steps=[('scaler', StandardScaler()),
-                ('lgbm', LGBMClassifier(n_estimators=3))])
-
-
+
+
Pipeline(steps=[('scaler', StandardScaler()),
+                ('lgbm', LGBMClassifier(n_estimators=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
+
+

Register the converter for LGBMClassifier#

The converter is implemented in onnxmltools: onnxmltools…LightGbm.py. and the shape calculator: @@ -469,9 +350,9 @@

Register the converter for LGBMClassifier options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})

-
-
-

Convert again

+ +
+

Convert again#

model_onnx = convert_sklearn(
     pipe, 'pipeline_lightgbm',
     [('input', FloatTensorType([None, 2]))],
@@ -482,17 +363,16 @@ 

Convert againf.write(model_onnx.SerializeToString())

-
-
-

Compare the predictions

+ +
+

Compare the predictions#

Predictions with LightGbm.

print("predict", pipe.predict(X[:5]))
 print("predict_proba", pipe.predict_proba(X[:1]))
 
-

Out:

-
predict [0 0 2 1 2]
-predict_proba [[0.52589117 0.23688316 0.23722567]]
+
predict [0 2 1 2 1]
+predict_proba [[0.51995794 0.24549283 0.23454923]]
 

Predictions with onnxruntime.

@@ -509,14 +389,13 @@

Compare the predictionsprint("predict_proba", pred_onx[1][:1])

-

Out:

-
predict [0 0 2 1 2]
-predict_proba [{0: 0.5258911848068237, 1: 0.23688316345214844, 2: 0.23722566664218903}]
+
predict [0 2 1 2 1]
+predict_proba [{0: 0.519957959651947, 1: 0.2454928159713745, 2: 0.23454922437667847}]
 
-
-
+
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer(
@@ -526,14 +405,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline.dot') -image = plt.imread("pipeline.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot pipeline lightgbm

Out:

-
(-0.5, 2108.5, 2558.5, -0.5)
+plot pipeline lightgbm
(-0.5, 2549.5, 2558.5, -0.5)
 

Versions used for this example

@@ -546,21 +424,17 @@

Display the ONNX graphprint("lightgbm: ", lightgbm.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
-onnxmltools:  1.11.0
-lightgbm:  3.3.2
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
+onnxmltools:  1.11.2
+lightgbm:  3.3.4
 
-

Total running time of the script: ( 0 minutes 4.818 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_pipeline_xgboost.html b/auto_examples/plot_pipeline_xgboost.html index a4e4257e6..05f1875c6 100644 --- a/auto_examples/plot_pipeline_xgboost.html +++ b/auto_examples/plot_pipeline_xgboost.html @@ -1,419 +1,299 @@ - - - - - - - - Convert a pipeline with a XGBoost model — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + Convert a pipeline with a XGBoost model - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

Convert a pipeline with a XGBoost model

+
+

Convert a pipeline with a XGBoost model#

sklearn-onnx only converts scikit-learn models into ONNX but many libraries implement scikit-learn API so that their models can be included in a scikit-learn pipeline. This example considers a pipeline including a XGBoost model. sklearn-onnx can convert the whole pipeline as long as it knows the converter associated to a XGBClassifier. Let’s see how to do it.

- -
-

Train a XGBoost classifier

+
+

Train a XGBoost classifier#

import os
 import numpy
 import matplotlib.pyplot as plt
@@ -465,11 +345,7 @@ 

Train a XGBoost classifier# we need to register a converter.

-

Out:

-
+
+

Register the converter for XGBClassifier#

The converter is implemented in onnxmltools: onnxmltools…XGBoost.py. and the shape calculator: @@ -498,9 +374,9 @@

Register the converter for XGBClassifier< options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})

-
-
-

Convert again

+ +
+

Convert again#

model_onnx = convert_sklearn(
     pipe, 'pipeline_xgboost',
     [('input', FloatTensorType([None, 2]))],
@@ -511,17 +387,16 @@ 

Convert againf.write(model_onnx.SerializeToString())

-
-
-

Compare the predictions

+ +
+

Compare the predictions#

Predictions with XGBoost.

print("predict", pipe.predict(X[:5]))
 print("predict_proba", pipe.predict_proba(X[:1]))
 
-

Out:

-
predict [1 1 0 2 1]
-predict_proba [[0.14671634 0.48657113 0.36671248]]
+
predict [0 1 2 2 1]
+predict_proba [[0.69600695 0.1526681  0.15132491]]
 

Predictions with onnxruntime.

@@ -531,14 +406,13 @@

Compare the predictionsprint("predict_proba", pred_onx[1][:1])

-

Out:

-
predict [1 1 0 2 1]
-predict_proba [{0: 0.14671637117862701, 1: 0.48657119274139404, 2: 0.3667125403881073}]
+
predict [0 1 2 2 1]
+predict_proba [{0: 0.6960069537162781, 1: 0.15266810357570648, 2: 0.15132491290569305}]
 
-
-
+
+

Display the ONNX graph#

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name, rankdir="TB",
     node_producer=GetOpNodeProducer(
@@ -548,14 +422,13 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline.dot') -image = plt.imread("pipeline.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot pipeline xgboost

Out:

-
(-0.5, 2112.5, 2558.5, -0.5)
+plot pipeline xgboost
(-0.5, 2485.5, 2558.5, -0.5)
 

Versions used for this example

@@ -568,21 +441,17 @@

Display the ONNX graphprint("xgboost: ", xgboost.__version__)

-

Out:

-
numpy: 1.22.1
-scikit-learn: 1.1.dev0
-onnx:  1.11.0
-onnxruntime:  1.11.0+cpu
-skl2onnx:  1.11
-onnxmltools:  1.11.0
-xgboost:  1.5.2
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
+onnxmltools:  1.11.2
+xgboost:  1.7.3
 
-

Total running time of the script: ( 0 minutes 3.584 seconds)

- - +
+ + +
+
- - -
- - - +
- - -
-
- - - - - + + +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_examples/plot_tfidfvectorizer.html b/auto_examples/plot_tfidfvectorizer.html index 98ab2d5b7..121cd5f94 100644 --- a/auto_examples/plot_tfidfvectorizer.html +++ b/auto_examples/plot_tfidfvectorizer.html @@ -1,404 +1,296 @@ - - - - - - - - TfIdfVectorizer with ONNX — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + + + + + + + + TfIdfVectorizer with ONNX - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- - - - - - -
- -
- - -
- - - - - +
+ + + +
+
+
+ + + + + Back to top + +
-
- -
- - +
+ -
-

TfIdfVectorizer with ONNX

+
+

TfIdfVectorizer with ONNX#

This example is inspired from the following example: Column Transformer with Heterogeneous Data Sources which builds a pipeline to classify text.

- -
-

Train a pipeline with TfidfVectorizer

+
+

Train a pipeline with TfidfVectorizer#

It replicates the same pipeline taken from scikit-learn documentation but reduces it to the part ONNX actually supports without implementing a custom converter. Let’s get the data.

@@ -482,7 +374,7 @@

Train a pipeline with TfidfVectorizer
pipeline = Pipeline([
     ('union', ColumnTransformer(
         [
-            ('subject', TfidfVectorizer(min_df=50), 0),
+            ('subject', TfidfVectorizer(min_df=50, max_features=500), 0),
 
             ('body_bow', Pipeline([
                 ('tfidf', TfidfVectorizer()),
@@ -513,20 +405,19 @@ 

Train a pipeline with TfidfVectorizer print(classification_report(pipeline.predict(test_data), test.target))

-

Out:

              precision    recall  f1-score   support
 
-           0       0.69      0.77      0.73       286
-           1       0.74      0.65      0.70       284
+           0       0.69      0.78      0.73       285
+           1       0.75      0.66      0.70       285
 
-    accuracy                           0.71       570
-   macro avg       0.72      0.71      0.71       570
-weighted avg       0.72      0.71      0.71       570
+    accuracy                           0.72       570
+   macro avg       0.72      0.72      0.71       570
+weighted avg       0.72      0.72      0.71       570
 
-

-
+
+

ONNX conversion#

It is difficult to replicate the exact same tokenizer behaviour if the tokeniser comes from space, gensim or nltk. The default one used by scikit-learn uses regular expressions @@ -548,11 +439,6 @@

ONNX conversionoptions=seps, target_opset=12)

-

Out:

-
C:\xadupre\microsoft_xadupre\sklearn-onnx\skl2onnx\common\_container.py:695: UserWarning: Unable to find operator 'Tokenizer' in domain 'com.microsoft' in ONNX, op_version is forced to 1.
-  warnings.warn(
-
-

And save.

with open("pipeline_tfidf.onnx", "wb") as f:
     f.write(model_onnx.SerializeToString())
@@ -567,11 +453,10 @@ 

ONNX conversionprint("predict_proba", pred_onx[1])

-

Out:

--- [" Re: Jews can't hide from keith@cco."
  'Deletions...\n\nSo, you consider the german poster\'s remark anti-semitic?  Perhaps you\nimply that anyone in Germany who doesn\'t agree with israely policy in a\nnazi?  Pray tell, how does it even qualify as "casual anti-semitism"? \nIf the term doesn\'t apply, why then bring it up?\n\nYour own bigotry is shining through.  \n-- ']
 predict [1]
-predict_proba [{0: 0.4394298791885376, 1: 0.5605701208114624}]
+predict_proba [{0: 0.4384377896785736, 1: 0.561562180519104}]
 

With scikit-learn:

@@ -579,17 +464,16 @@

ONNX conversionprint(pipeline.predict_proba(train_data[:1]))

-

Out:

[0]
-[[0.71055267 0.28944733]]
+[[0.71903792 0.28096208]]
 

There are discrepencies for this model because the tokenization is not exactly the same. This is a work in progress.

-
-
-

Display the ONNX graph

+ +
+

Display the ONNX graph#

Finally, let’s see the graph converted with sklearn-onnx.

pydot_graph = GetPydotGraph(
     model_onnx.graph, name=model_onnx.graph.name,
@@ -601,21 +485,17 @@ 

Display the ONNX graphos.system('dot -O -Gdpi=300 -Tpng pipeline_tfidf.dot') -image = plt.imread("pipeline_tfidf.dot.png") -fig, ax = plt.subplots(figsize=(40, 20)) -ax.imshow(image) -ax.axis('off') +image = plt.imread("pipeline_tfidf.dot.png") +fig, ax = plt.subplots(figsize=(40, 20)) +ax.imshow(image) +ax.axis('off')

-plot tfidfvectorizer

Out:

-
(-0.5, 3816.5, 12237.5, -0.5)
+plot tfidfvectorizer
(-0.5, 4939.5, 11475.5, -0.5)
 
-

Total running time of the script: ( 0 minutes 11.450 seconds)

- - +
+ + +
+
- - - - - - + - - + + + + + \ No newline at end of file diff --git a/auto_examples/sg_execution_times.html b/auto_examples/sg_execution_times.html index 559e816e7..b9481dc91 100644 --- a/auto_examples/sg_execution_times.html +++ b/auto_examples/sg_execution_times.html @@ -1,333 +1,429 @@ + + + + + + - - - - - - - Computation times — sklearn-onnx 1.11 documentation - - - - - - - - - - - - - - - - - + + Computation times - sklearn-onnx 1.14.0 documentation + + + - - - - - - - - - - - - - - - - + + - - - -
-
- - - -
-
- + +
+
+
+ + + + + Back to top + +
- - -
- - -
- - - -
- -
- -
- - +
+
- - - - - - -
- -
- -
-

Computation times

-

01:30.679 total execution time for auto_examples files:

- ----- + + +
+
+

Computation times#

+

01:36.683 total execution time for auto_examples files:

+
+
- - + + - - + + - - - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - + - + - + + + + + - - + + - - + + - - + + - - + + + + + +

Convert a model with a reduced list of operators (plot_black_op.py)

00:20.711

Write your own converter for your own model (plot_custom_model.py)

00:25.419

0.0 MB

Write your own converter for your own model (plot_custom_model.py)

00:13.684

Convert a model with a reduced list of operators (plot_black_op.py)

00:21.877

0.0 MB

TfIdfVectorizer with ONNX (plot_tfidfvectorizer.py)

00:11.450

0.0 MB

Convert a pipeline with ColumnTransformer (plot_complex_pipeline.py)

00:07.062

0.0 MB

Convert a pipeline with a LightGbm model (plot_pipeline_lightgbm.py)

00:04.818

00:10.979

0.0 MB

Different ways to convert a model (plot_convert_syntax.py)

00:04.354

Benchmark a pipeline (plot_benchmark_pipeline.py)

00:06.685

0.0 MB

Benchmark a pipeline (plot_benchmark_pipeline.py)

00:04.030

Convert a pipeline with ColumnTransformer (plot_complex_pipeline.py)

00:05.573

0.0 MB

Compare CDist with scipy (plot_benchmark_cdist.py)

00:03.862

Walk through intermediate outputs (plot_intermediate_outputs.py)

00:04.105

0.0 MB

Walk through intermediate outputs (plot_intermediate_outputs.py)

00:03.682

Discrepencies with StandardScaler (plot_cast_transformer.py)

00:03.795

0.0 MB

Convert a pipeline with a XGBoost model (plot_pipeline_xgboost.py)

00:03.584

Different ways to convert a model (plot_convert_syntax.py)

00:03.632

0.0 MB

Discrepencies with StandardScaler (plot_cast_transformer.py)

00:03.542

When a custom model is neither a classifier nor a regressor (alternative) (plot_custom_parser_alternative.py)

00:02.812

0.0 MB

When a custom model is neither a classifier nor a regressor (alternative) (plot_custom_parser_alternative.py)

00:02.654

When a custom model is neither a classifier nor a regressor (plot_custom_parser.py)

00:02.623

0.0 MB

When a custom model is neither a classifier nor a regressor (plot_custom_parser.py)

00:02.490

Compare CDist with scipy (plot_benchmark_cdist.py)

00:01.989

0.0 MB

Play with ONNX operators (plot_onnx_operators.py)

00:01.104

Convert a pipeline with a XGBoost model (plot_pipeline_xgboost.py)

00:01.691

0.0 MB

Draw a pipeline (plot_pipeline.py)

00:00.801

Convert a pipeline with a LightGbm model (plot_pipeline_lightgbm.py)

00:01.669

0.0 MB

Discrepencies with GaussianProcessorRegressor: use of double (plot_gpr.py)

00:00.753

00:01.592

0.0 MB

Custom Operator for NMF Decomposition (plot_nmf.py)

00:00.428

Play with ONNX operators (plot_onnx_operators.py)

00:01.010

0.0 MB

ONNX Runtime Backend for ONNX (plot_backend.py)

00:00.419

Custom Operator for NMF Decomposition (plot_nmf.py)

00:00.368

0.0 MB

Investigate a pipeline (plot_investigate_pipeline.py)

00:00.323

00:00.274

0.0 MB

Train, convert and predict a model (plot_convert_model.py)

00:00.273

00:00.182

0.0 MB

Probabilities as a vector or as a ZipMap (plot_convert_zipmap.py)

00:00.264

00:00.153

0.0 MB

Draw a pipeline (plot_pipeline.py)

00:00.124

0.0 MB

Logging, verbose (plot_logging.py)

00:00.158

Logging, verbose (plot_logging.py)

00:00.092

0.0 MB

Probabilities or raw scores (plot_convert_decision_function.py)

00:00.087

Probabilities or raw scores (plot_convert_decision_function.py)

00:00.042

0.0 MB

Metadata (plot_metadata.py)

00:00.079

ONNX Runtime Backend for ONNX (plot_backend.py)

00:00.000

0.0 MB

Errors with onnxruntime (plot_errors_onnxruntime.py)

00:00.064

Errors with onnxruntime (plot_errors_onnxruntime.py)

00:00.000

0.0 MB

Metadata (plot_metadata.py)

00:00.000

0.0 MB

+ - -
- - - -
-
- -
- - +
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
- - -
-
- - - - - +
-
- +
+ + + + \ No newline at end of file diff --git a/auto_tutorial/index.html b/auto_tutorial/index.html index 071453554..71df773b7 100644 --- a/auto_tutorial/index.html +++ b/auto_tutorial/index.html @@ -1,464 +1,420 @@ - - - - - - - - - Examples — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- - -
- - - -
- -
- -
- - -
- - - - -
- -
- -
-

Examples#

-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Converter for WOE -
-

Converter for WOE#

-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-

Gallery generated by Sphinx-Gallery

-
- - -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + + Examples - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Examples#

+
Fast runtime with onnxruntime +

Fast runtime with onnxruntime

+
Fast runtime with onnxruntime
+
Tricky issue when converting CountVectorizer or TfidfVectorizer +

Tricky issue when converting CountVectorizer or TfidfVectorizer

+
Tricky issue when converting CountVectorizer or TfidfVectorizer
+
Train and deploy a scikit-learn pipeline +

Train and deploy a scikit-learn pipeline

+
Train and deploy a scikit-learn pipeline
+
Store arrays in one onnx graph +

Store arrays in one onnx graph

+
Store arrays in one onnx graph
+
Convert a pipeline with a LightGBM classifier +

Convert a pipeline with a LightGBM classifier

+
Convert a pipeline with a LightGBM classifier
+
What is the opset number? +

What is the opset number?

+
What is the opset number?
+
Black list operators when converting +

Black list operators when converting

+
Black list operators when converting
+
Dealing with discrepancies (tf-idf) +

Dealing with discrepancies (tf-idf)

+
Dealing with discrepancies (tf-idf)
+
Intermediate results and investigation +

Intermediate results and investigation

+
Intermediate results and investigation
+
Converter for WOE +

Converter for WOE

+
Converter for WOE
+
Modify the ONNX graph +

Modify the ONNX graph

+
Modify the ONNX graph
+
Benchmark ONNX conversion +

Benchmark ONNX conversion

+
Benchmark ONNX conversion
+
Dataframe as an input +

Dataframe as an input

+
Dataframe as an input
+
Convert a pipeline with a LightGBM regressor +

Convert a pipeline with a LightGBM regressor

+
Convert a pipeline with a LightGBM regressor
+
Implement a new converter using other converters +

Implement a new converter using other converters

+
Implement a new converter using other converters
+
Issues when switching to float +

Issues when switching to float

+
Issues when switching to float
+
Choose appropriate output of a classifier +

Choose appropriate output of a classifier

+
Choose appropriate output of a classifier
+
One model, many possible conversions with options +

One model, many possible conversions with options

+
One model, many possible conversions with options
+
Change the number of outputs by adding a parser +

Change the number of outputs by adding a parser

+
Change the number of outputs by adding a parser
+
Two ways to implement a converter +

Two ways to implement a converter

+
Two ways to implement a converter
+
A new converter with options +

A new converter with options

+
A new converter with options
+
Convert a pipeline with a XGBoost model +

Convert a pipeline with a XGBoost model

+
Convert a pipeline with a XGBoost model
+
Convert a pipeline with a CatBoost classifier +

Convert a pipeline with a CatBoost classifier

+
Convert a pipeline with a CatBoost classifier
+
Transfer Learning with ONNX +

Transfer Learning with ONNX

+
Transfer Learning with ONNX
+
Implement a new converter +

Implement a new converter

+
Implement a new converter
+
Converter for pyod.models.iforest.IForest +

Converter for pyod.models.iforest.IForest

+
Converter for pyod.models.iforest.IForest
+
Converter for WOEEncoder from categorical_encoder +

Converter for WOEEncoder from categorical_encoder

+
Converter for WOEEncoder from categorical_encoder
+
TfIdf and sparse matrices +

TfIdf and sparse matrices

+
TfIdf and sparse matrices
+
Fast design with a python runtime +

Fast design with a python runtime

+
Fast design with a python runtime
+
+
+ +

Gallery generated by Sphinx-Gallery

+
+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_abegin_convert_pipeline.html b/auto_tutorial/plot_abegin_convert_pipeline.html index 954558209..a53f1487f 100644 --- a/auto_tutorial/plot_abegin_convert_pipeline.html +++ b/auto_tutorial/plot_abegin_convert_pipeline.html @@ -1,691 +1,546 @@ - - - - - - - - - Train and deploy a scikit-learn pipeline — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - - - -
- -
- - -
- - - - -
- -
- - -
-

Train and deploy a scikit-learn pipeline#

-

This program starts from an example in scikit-learn -documentation: Plot individual and voting regression predictions, -converts it into ONNX and finally computes the predictions -a different runtime.

- -
-

Training a pipeline#

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-import numpy
-from onnxruntime import InferenceSession
-from sklearn.datasets import load_diabetes
-from sklearn.ensemble import (
-    GradientBoostingRegressor, RandomForestRegressor,
-    VotingRegressor)
-from sklearn.linear_model import LinearRegression
-from sklearn.model_selection import train_test_split
-from sklearn.pipeline import Pipeline
-from skl2onnx import to_onnx
-from mlprodict.onnxrt import OnnxInference
-
-
-X, y = load_diabetes(return_X_y=True)
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-
-# Train classifiers
-reg1 = GradientBoostingRegressor(random_state=1, n_estimators=5)
-reg2 = RandomForestRegressor(random_state=1, n_estimators=5)
-reg3 = LinearRegression()
-
-ereg = Pipeline(steps=[
-    ('voting', VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])),
-])
-ereg.fit(X_train, y_train)
-
-
-
-
Pipeline(steps=[('voting',
-                 VotingRegressor(estimators=[('gb',
-                                              GradientBoostingRegressor(n_estimators=5,
-                                                                        random_state=1)),
-                                             ('rf',
-                                              RandomForestRegressor(n_estimators=5,
-                                                                    random_state=1)),
-                                             ('lr', LinearRegression())]))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

Converts the model#

-

The second argument gives a sample of the data -used to train the model. It is used to infer -the input type of the ONNX graph. It is converted -into single float and ONNX runtimes may not fully -support doubles.

-
onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),
-              target_opset=12)
-
-
-
-
-

Prediction with ONNX#

-

The first example uses onnxruntime.

-
sess = InferenceSession(onx.SerializeToString())
-pred_ort = sess.run(None, {'X': X_test.astype(numpy.float32)})[0]
-
-pred_skl = ereg.predict(X_test.astype(numpy.float32))
-
-print("Onnx Runtime prediction:\n", pred_ort[:5])
-print("Sklearn rediction:\n", pred_skl[:5])
-
-
-

Out:

-
Onnx Runtime prediction:
- [[169.97092]
- [165.00122]
- [161.92123]
- [129.07263]
- [177.11513]]
-Sklearn rediction:
- [169.97090438 165.00121185 161.92123427 129.07263128 177.11512633]
-
-
-
-
-

Comparison#

-

Before deploying, we need to compare that both -scikit-learn and ONNX return the same predictions.

-
def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(pred_skl, pred_ort))
-
-
-

Out:

-
(2.888406933720944e-05, 1.3576994417322481e-07)
-
-
-

It looks good. Biggest errors (absolute and relative) -are within the margin error introduced by using -floats instead of doubles. -We can save the model into ONNX -format and compute the same predictions in many -platform using onnxruntime.

-
-
-

Python runtime#

-

A python runtime can be used as well to compute -the prediction. It is not meant to be used into -production (it still relies on python), but it is -useful to investigate why the conversion went wrong. -It uses module mlprodict.

-
oinf = OnnxInference(onx, runtime="python_compiled")
-print(oinf)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # init: w0 (w0)
-        # inputs
-        X = dict_inputs['X']
-        (var_2, ) = n0_linearregressor(X)
-        (var_0, ) = n1_treeensembleregressor_1(X)
-        (var_1, ) = n2_treeensembleregressor_1(X)
-        (wvar_2, ) = n3_mul(var_2, w0)
-        (wvar_1, ) = n4_mul(var_1, w0)
-        (wvar_0, ) = n5_mul(var_0, w0)
-        (fvar_2, ) = n6_flatten(wvar_2)
-        (fvar_1, ) = n7_flatten(wvar_1)
-        (fvar_0, ) = n8_flatten(wvar_0)
-        (variable, ) = n9_sum(fvar_0, fvar_1, fvar_2)
-        return {
-            'variable': variable,
-        }
-
-
-

It works almost the same way.

-
pred_pyrt = oinf.run({'X': X_test.astype(numpy.float32)})['variable']
-print(diff(pred_skl, pred_pyrt))
-
-
-

Out:

-
(2.888406933720944e-05, 1.3576994417322481e-07)
-
-
-

Final graph -You may need to install graphviz from https://graphviz.org/download/ -+++++++++++

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot abegin convert pipeline

Total running time of the script: ( 0 minutes 1.391 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Train and deploy a scikit-learn pipeline - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Train and deploy a scikit-learn pipeline#

+

This program starts from an example in scikit-learn +documentation: Plot individual and voting regression predictions, +converts it into ONNX and finally computes the predictions +a different runtime.

+
+

Training a pipeline#

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+import numpy
+from onnxruntime import InferenceSession
+from sklearn.datasets import load_diabetes
+from sklearn.ensemble import (
+    GradientBoostingRegressor, RandomForestRegressor,
+    VotingRegressor)
+from sklearn.linear_model import LinearRegression
+from sklearn.model_selection import train_test_split
+from sklearn.pipeline import Pipeline
+from skl2onnx import to_onnx
+from mlprodict.onnxrt import OnnxInference
+
+
+X, y = load_diabetes(return_X_y=True)
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+# Train classifiers
+reg1 = GradientBoostingRegressor(random_state=1, n_estimators=5)
+reg2 = RandomForestRegressor(random_state=1, n_estimators=5)
+reg3 = LinearRegression()
+
+ereg = Pipeline(steps=[
+    ('voting', VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])),
+])
+ereg.fit(X_train, y_train)
+
+
+
+
Pipeline(steps=[('voting',
+                 VotingRegressor(estimators=[('gb',
+                                              GradientBoostingRegressor(n_estimators=5,
+                                                                        random_state=1)),
+                                             ('rf',
+                                              RandomForestRegressor(n_estimators=5,
+                                                                    random_state=1)),
+                                             ('lr', LinearRegression())]))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Converts the model#

+

The second argument gives a sample of the data +used to train the model. It is used to infer +the input type of the ONNX graph. It is converted +into single float and ONNX runtimes may not fully +support doubles.

+
onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),
+              target_opset=12)
+
+
+
+
+

Prediction with ONNX#

+

The first example uses onnxruntime.

+
sess = InferenceSession(onx.SerializeToString())
+pred_ort = sess.run(None, {'X': X_test.astype(numpy.float32)})[0]
+
+pred_skl = ereg.predict(X_test.astype(numpy.float32))
+
+print("Onnx Runtime prediction:\n", pred_ort[:5])
+print("Sklearn rediction:\n", pred_skl[:5])
+
+
+
Onnx Runtime prediction:
+ [[214.14449 ]
+ [107.54984 ]
+ [ 83.3338  ]
+ [168.55504 ]
+ [ 85.816086]]
+Sklearn rediction:
+ [214.14448115 107.54983764  83.33379987 168.55502653  85.81608297]
+
+
+
+
+

Comparison#

+

Before deploying, we need to compare that both +scikit-learn and ONNX return the same predictions.

+
def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(pred_skl, pred_ort))
+
+
+
(2.225058631211141e-05, 1.2777930441858178e-07)
+
+
+

It looks good. Biggest errors (absolute and relative) +are within the margin error introduced by using +floats instead of doubles. +We can save the model into ONNX +format and compute the same predictions in many +platform using onnxruntime.

+
+
+

Python runtime#

+

A python runtime can be used as well to compute +the prediction. It is not meant to be used into +production (it still relies on python), but it is +useful to investigate why the conversion went wrong. +It uses module mlprodict.

+
oinf = OnnxInference(onx, runtime="python_compiled")
+print(oinf)
+
+
+
OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # init: w0 (w0)
+        # inputs
+        X = dict_inputs['X']
+        (var_0, ) = n0_treeensembleregressor_1(X)
+        (var_1, ) = n1_treeensembleregressor_1(X)
+        (var_2, ) = n2_linearregressor(X)
+        (wvar_1, ) = n3_mul(var_1, w0)
+        (wvar_0, ) = n4_mul(var_0, w0)
+        (wvar_2, ) = n5_mul(var_2, w0)
+        (fvar_0, ) = n6_flatten(wvar_0)
+        (fvar_1, ) = n7_flatten(wvar_1)
+        (fvar_2, ) = n8_flatten(wvar_2)
+        (variable, ) = n9_sum(fvar_0, fvar_1, fvar_2)
+        return {
+            'variable': variable,
+        }
+
+
+

It works almost the same way.

+
pred_pyrt = oinf.run({'X': X_test.astype(numpy.float32)})['variable']
+print(diff(pred_skl, pred_pyrt))
+
+
+
(2.225058631211141e-05, 1.2777930441858178e-07)
+
+
+

Final graph +You may need to install graphviz from https://graphviz.org/download/ ++++++++++++

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot abegin convert pipeline

Total running time of the script: ( 0 minutes 1.079 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_bbegin_measure_time.html b/auto_tutorial/plot_bbegin_measure_time.html index 6a414d1b4..d470cc516 100644 --- a/auto_tutorial/plot_bbegin_measure_time.html +++ b/auto_tutorial/plot_bbegin_measure_time.html @@ -1,828 +1,694 @@ - - - - - - - - - Benchmark ONNX conversion — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Benchmark ONNX conversion#

-

Example Train and deploy a scikit-learn pipeline converts a simple model. -This example takes a similar example but on random data -and compares the processing time required by each option -to compute predictions.

- -
-

Training a pipeline#

-
import numpy
-from pandas import DataFrame
-from tqdm import tqdm
-from sklearn import config_context
-from sklearn.datasets import make_regression
-from sklearn.ensemble import (
-    GradientBoostingRegressor, RandomForestRegressor,
-    VotingRegressor)
-from sklearn.linear_model import LinearRegression
-from sklearn.model_selection import train_test_split
-from mlprodict.onnxrt import OnnxInference
-from onnxruntime import InferenceSession
-from skl2onnx import to_onnx
-from skl2onnx.tutorial import measure_time
-
-
-N = 11000
-X, y = make_regression(N, n_features=10)
-X_train, X_test, y_train, y_test = train_test_split(
-    X, y, train_size=0.01)
-print("Train shape", X_train.shape)
-print("Test shape", X_test.shape)
-
-reg1 = GradientBoostingRegressor(random_state=1)
-reg2 = RandomForestRegressor(random_state=1)
-reg3 = LinearRegression()
-ereg = VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])
-ereg.fit(X_train, y_train)
-
-
-

Out:

-
Train shape (110, 10)
-Test shape (10890, 10)
-
-
-
-
VotingRegressor(estimators=[('gb', GradientBoostingRegressor(random_state=1)),
-                            ('rf', RandomForestRegressor(random_state=1)),
-                            ('lr', LinearRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

Measure the processing time#

-

We use function skl2onnx.tutorial.measure_time(). -The page about assume_finite -may be useful if you need to optimize the prediction. -We measure the processing time per observation whether -or not an observation belongs to a batch or is a single one.

-
sizes = [(1, 50), (10, 50), (1000, 10), (10000, 5)]
-
-with config_context(assume_finite=True):
-    obs = []
-    for batch_size, repeat in tqdm(sizes):
-        context = {"ereg": ereg, 'X': X_test[:batch_size]}
-        mt = measure_time(
-            "ereg.predict(X)", context, div_by_number=True,
-            number=10, repeat=repeat)
-        mt['size'] = context['X'].shape[0]
-        mt['mean_obs'] = mt['average'] / mt['size']
-        obs.append(mt)
-
-df_skl = DataFrame(obs)
-df_skl
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s]
- 25%|#############                                       | 1/4 [00:02<00:06,  2.24s/it]
- 50%|##########################                          | 2/4 [00:04<00:04,  2.24s/it]
- 75%|#######################################             | 3/4 [00:06<00:02,  2.03s/it]
-100%|####################################################| 4/4 [00:11<00:00,  3.16s/it]
-100%|####################################################| 4/4 [00:11<00:00,  2.79s/it]
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
averagedeviationmin_execmax_execrepeatnumbersizemean_obs
00.0044730.0005340.0042910.008135501010.004473
10.0044600.0003430.0043320.0068355010100.000446
20.0177200.0013140.0169420.021182101010000.000018
30.0977640.0038300.0908960.102486510100000.000010
-
-
-
-

Graphe.

-
df_skl.set_index('size')[['mean_obs']].plot(
-    title="scikit-learn", logx=True, logy=True)
-
-
-scikit-learn
-
-

ONNX runtime#

-

The same is done with the two ONNX runtime -available.

-
onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),
-              target_opset=14)
-sess = InferenceSession(onx.SerializeToString())
-oinf = OnnxInference(onx, runtime="python_compiled")
-
-obs = []
-for batch_size, repeat in tqdm(sizes):
-
-    # scikit-learn
-    context = {"ereg": ereg, 'X': X_test[:batch_size].astype(numpy.float32)}
-    mt = measure_time(
-        "ereg.predict(X)", context, div_by_number=True,
-        number=10, repeat=repeat)
-    mt['size'] = context['X'].shape[0]
-    mt['skl'] = mt['average'] / mt['size']
-
-    # onnxruntime
-    context = {"sess": sess, 'X': X_test[:batch_size].astype(numpy.float32)}
-    mt2 = measure_time(
-        "sess.run(None, {'X': X})[0]", context, div_by_number=True,
-        number=10, repeat=repeat)
-    mt['ort'] = mt2['average'] / mt['size']
-
-    # mlprodict
-    context = {"oinf": oinf, 'X': X_test[:batch_size].astype(numpy.float32)}
-    mt2 = measure_time(
-        "oinf.run({'X': X})['variable']", context, div_by_number=True,
-        number=10, repeat=repeat)
-    mt['pyrt'] = mt2['average'] / mt['size']
-
-    # end
-    obs.append(mt)
-
-
-df = DataFrame(obs)
-df
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s]
- 25%|#############                                       | 1/4 [00:02<00:06,  2.32s/it]
- 50%|##########################                          | 2/4 [00:05<00:05,  2.56s/it]
- 75%|#######################################             | 3/4 [00:07<00:02,  2.35s/it]
-100%|####################################################| 4/4 [00:13<00:00,  3.93s/it]
-100%|####################################################| 4/4 [00:13<00:00,  3.38s/it]
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
averagedeviationmin_execmax_execrepeatnumbersizesklortpyrt
00.0044810.0001660.0044010.005481501010.0044810.0000790.000067
10.0052040.0003480.0050310.0072925010100.0005200.0000120.000014
20.0173020.0005730.0169540.018913101010000.0000170.0000010.000002
30.0958360.0093300.0841420.111543510100000.0000100.0000010.000002
-
-
-
-

Graph.

-
df.set_index('size')[['skl', 'ort', 'pyrt']].plot(
-    title="Average prediction time per runtime",
-    logx=True, logy=True)
-
-
-Average prediction time per runtime

ONNX runtimes are much faster than scikit-learn -to predict one observation. scikit-learn is optimized -for training, for batch prediction. That explains why -scikit-learn and ONNX runtimes seem to converge -for big batches. They use similar implementation, -parallelization and languages (C++, openmp).

-

Total running time of the script: ( 0 minutes 31.317 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Benchmark ONNX conversion - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Benchmark ONNX conversion#

+

Example Train and deploy a scikit-learn pipeline converts a simple model. +This example takes a similar example but on random data +and compares the processing time required by each option +to compute predictions.

+
+

Training a pipeline#

+
import numpy
+from pandas import DataFrame
+from tqdm import tqdm
+from sklearn import config_context
+from sklearn.datasets import make_regression
+from sklearn.ensemble import (
+    GradientBoostingRegressor, RandomForestRegressor,
+    VotingRegressor)
+from sklearn.linear_model import LinearRegression
+from sklearn.model_selection import train_test_split
+from mlprodict.onnxrt import OnnxInference
+from onnxruntime import InferenceSession
+from skl2onnx import to_onnx
+from skl2onnx.tutorial import measure_time
+
+
+N = 11000
+X, y = make_regression(N, n_features=10)
+X_train, X_test, y_train, y_test = train_test_split(
+    X, y, train_size=0.01)
+print("Train shape", X_train.shape)
+print("Test shape", X_test.shape)
+
+reg1 = GradientBoostingRegressor(random_state=1)
+reg2 = RandomForestRegressor(random_state=1)
+reg3 = LinearRegression()
+ereg = VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])
+ereg.fit(X_train, y_train)
+
+
+
Train shape (110, 10)
+Test shape (10890, 10)
+
+
+
+
VotingRegressor(estimators=[('gb', GradientBoostingRegressor(random_state=1)),
+                            ('rf', RandomForestRegressor(random_state=1)),
+                            ('lr', LinearRegression())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Measure the processing time#

+

We use function skl2onnx.tutorial.measure_time(). +The page about assume_finite +may be useful if you need to optimize the prediction. +We measure the processing time per observation whether +or not an observation belongs to a batch or is a single one.

+
sizes = [(1, 50), (10, 50), (1000, 10), (10000, 5)]
+
+with config_context(assume_finite=True):
+    obs = []
+    for batch_size, repeat in tqdm(sizes):
+        context = {"ereg": ereg, 'X': X_test[:batch_size]}
+        mt = measure_time(
+            "ereg.predict(X)", context, div_by_number=True,
+            number=10, repeat=repeat)
+        mt['size'] = context['X'].shape[0]
+        mt['mean_obs'] = mt['average'] / mt['size']
+        obs.append(mt)
+
+df_skl = DataFrame(obs)
+df_skl
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]
+ 25%|##5       | 1/4 [00:03<00:10,  3.56s/it]
+ 50%|#####     | 2/4 [00:06<00:06,  3.44s/it]
+ 75%|#######5  | 3/4 [00:10<00:03,  3.58s/it]
+100%|##########| 4/4 [00:18<00:00,  5.37s/it]
+100%|##########| 4/4 [00:18<00:00,  4.70s/it]
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
averagedeviationmin_execmax_execrepeatnumbersizemean_obs
00.0071180.0013540.0057860.012553501010.007118
10.0066860.0010050.0055420.0100005010100.000669
20.0375630.0032210.0332560.045436101010000.000038
30.1622500.0083430.1531900.177519510100000.000016
+
+
+
+

Graphe.

+
df_skl.set_index('size')[['mean_obs']].plot(
+    title="scikit-learn", logx=True, logy=True)
+
+
+scikit-learn
+
+

ONNX runtime#

+

The same is done with the two ONNX runtime +available.

+
onx = to_onnx(ereg, X_train[:1].astype(numpy.float32),
+              target_opset=14)
+sess = InferenceSession(onx.SerializeToString())
+oinf = OnnxInference(onx, runtime="python_compiled")
+
+obs = []
+for batch_size, repeat in tqdm(sizes):
+
+    # scikit-learn
+    context = {"ereg": ereg, 'X': X_test[:batch_size].astype(numpy.float32)}
+    mt = measure_time(
+        "ereg.predict(X)", context, div_by_number=True,
+        number=10, repeat=repeat)
+    mt['size'] = context['X'].shape[0]
+    mt['skl'] = mt['average'] / mt['size']
+
+    # onnxruntime
+    context = {"sess": sess, 'X': X_test[:batch_size].astype(numpy.float32)}
+    mt2 = measure_time(
+        "sess.run(None, {'X': X})[0]", context, div_by_number=True,
+        number=10, repeat=repeat)
+    mt['ort'] = mt2['average'] / mt['size']
+
+    # mlprodict
+    context = {"oinf": oinf, 'X': X_test[:batch_size].astype(numpy.float32)}
+    mt2 = measure_time(
+        "oinf.run({'X': X})['variable']", context, div_by_number=True,
+        number=10, repeat=repeat)
+    mt['pyrt'] = mt2['average'] / mt['size']
+
+    # end
+    obs.append(mt)
+
+
+df = DataFrame(obs)
+df
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]
+ 25%|##5       | 1/4 [00:03<00:10,  3.65s/it]
+ 50%|#####     | 2/4 [00:07<00:07,  3.52s/it]
+ 75%|#######5  | 3/4 [00:18<00:07,  7.29s/it]
+100%|##########| 4/4 [00:34<00:00, 10.72s/it]
+100%|##########| 4/4 [00:34<00:00,  8.71s/it]
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
averagedeviationmin_execmax_execrepeatnumbersizesklortpyrt
00.0071770.0007100.0058200.010053501010.0071770.0000260.000077
10.0066450.0008430.0056700.0087725010100.0006640.0000100.000010
20.0341820.0015190.0329910.038628101010000.0000340.0000040.000080
30.1555200.0030900.1514160.160624510100000.0000160.0000030.000014
+
+
+
+

Graph.

+
df.set_index('size')[['skl', 'ort', 'pyrt']].plot(
+    title="Average prediction time per runtime",
+    logx=True, logy=True)
+
+
+Average prediction time per runtime

ONNX runtimes are much faster than scikit-learn +to predict one observation. scikit-learn is optimized +for training, for batch prediction. That explains why +scikit-learn and ONNX runtimes seem to converge +for big batches. They use similar implementation, +parallelization and languages (C++, openmp).

+

Total running time of the script: ( 0 minutes 55.146 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_catwoe_transformer.html b/auto_tutorial/plot_catwoe_transformer.html index dc4bdc04f..66c89f27b 100644 --- a/auto_tutorial/plot_catwoe_transformer.html +++ b/auto_tutorial/plot_catwoe_transformer.html @@ -1,564 +1,624 @@ - - - - - - - - - Converter for WOEEncoder from categorical_encoder — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- - - - -
- -
- - -
- - - - -
- -
- - -
-

Converter for WOEEncoder from categorical_encoder#

-

WOEEncoder -is a transformer implemented in categorical_encoder and as such, -any converter would not be included in sklearn-onnx which only -implements converters for scikit-learn models. Anyhow, this -example demonstrates how to implement a custom converter -for WOEEncoder. This code is not fully tested for all possible -cases the original encoder can handle.

- -
-

A simple example#

-

Let’s take the Iris dataset. -Every feature is converter into integer.

-
import numpy as np
-from onnxruntime import InferenceSession
-from sklearn.datasets import load_iris
-from sklearn.preprocessing import OrdinalEncoder as SklOrdinalEncoder
-from category_encoders import WOEEncoder, OrdinalEncoder
-from skl2onnx import update_registered_converter, to_onnx, get_model_alias
-from skl2onnx.common.data_types import FloatTensorType
-from skl2onnx.common.utils import check_input_and_output_numbers
-from skl2onnx.algebra.onnx_ops import OnnxCast
-from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
-from skl2onnx.sklapi import WOETransformer
-import skl2onnx.sklapi.register  # noqa
-
-data = load_iris()
-X, y = data.data, data.target
-X = X.astype(np.int64)[:, :2]
-y = (y == 2).astype(np.int64)
-
-woe = WOEEncoder(cols=[0]).fit(X, y)
-print(woe.transform(X[:5]))
-
-
-

Out:

-
          0  1
-0 -1.405712  3
-1 -1.724166  3
-2 -1.724166  3
-3 -1.724166  3
-4 -1.405712  3
-
-
-

Let’s look into the trained parameters of the model. -It appears that WOEEncoder uses an OrdinalEncoder -but not the one from scikit-learn. We need to add a -converter for this model tool.

-
print("encoder", type(woe.ordinal_encoder), woe.ordinal_encoder)
-print("mapping", woe.mapping)
-print("encoder.mapping", woe.ordinal_encoder.mapping)
-print("encoder.cols", woe.ordinal_encoder.cols)
-
-
-

Out:

-
encoder <class 'category_encoders.ordinal.OrdinalEncoder'> OrdinalEncoder(cols=[0],
-               mapping=[{'col': 0, 'data_type': dtype('int64'),
-                         'mapping': 5.0    1
-4.0    2
-7.0    3
-6.0    4
-NaN   -2
-dtype: int64}])
-mapping {0: 0
- 1   -1.405712
- 2   -1.724166
- 3    2.545531
- 4    0.961411
--1    0.000000
--2    0.000000
-dtype: float64}
-encoder.mapping [{'col': 0, 'mapping': 5.0    1
-4.0    2
-7.0    3
-6.0    4
-NaN   -2
-dtype: int64, 'data_type': dtype('int64')}]
-encoder.cols [0]
-
-
-
-
-

Custom converter for OrdinalEncoder#

-

We start from example Implement a new converter -and then write the conversion.

-
def ordenc_to_sklearn(op_mapping):
-    "Converts OrdinalEncoder mapping to scikit-learn OrdinalEncoder."
-    cats = []
-    for column_map in op_mapping:
-        col = column_map['col']
-        while len(cats) <= col:
-            cats.append(None)
-        mapping = column_map['mapping']
-        res = []
-        for i in range(mapping.shape[0]):
-            if np.isnan(mapping.index[i]):
-                continue
-            ind = mapping.iloc[i]
-            while len(res) <= ind:
-                res.append(0)
-            res[ind] = mapping.index[i]
-        cats[col] = np.array(res, dtype=np.int64)
-
-    skl_ord = SklOrdinalEncoder(categories=cats, dtype=np.int64)
-    skl_ord.categories_ = cats
-    return skl_ord
-
-
-def ordinal_encoder_shape_calculator(operator):
-    check_input_and_output_numbers(
-        operator, input_count_range=1, output_count_range=1)
-    input_type = operator.inputs[0].type.__class__
-    input_dim = operator.inputs[0].get_first_dimension()
-    shape = operator.inputs[0].type.shape
-    second_dim = None if len(shape) != 2 else shape[1]
-    output_type = input_type([input_dim, second_dim])
-    operator.outputs[0].type = output_type
-
-
-def ordinal_encoder_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    X = operator.inputs[0]
-
-    skl_ord = ordenc_to_sklearn(op.mapping)
-    cat = OnnxSubEstimator(skl_ord, X, op_version=opv,
-                           output_names=operator.outputs[:1])
-    cat.add_to(scope, container)
-
-
-update_registered_converter(
-    OrdinalEncoder, "CategoricalEncoderOrdinalEncoder",
-    ordinal_encoder_shape_calculator,
-    ordinal_encoder_converter)
-
-
-

Let’s compute the output one a short example.

-
enc = OrdinalEncoder(cols=[0, 1])
-enc.fit(X)
-print(enc.transform(X[:5]))
-
-
-

Out:

-
   0  1
-0  1  1
-1  2  1
-2  2  1
-3  2  1
-4  1  1
-
-
-

Let’s check the ONNX conversion produces the same results.

-
ord_onx = to_onnx(enc, X[:1], target_opset=14)
-sess = InferenceSession(ord_onx.SerializeToString())
-print(sess.run(None, {'X': X[:5]})[0])
-
-
-

Out:

-
[[1 1]
- [2 1]
- [2 1]
- [2 1]
- [1 1]]
-
-
-

That works.

-
-
-

Custom converter for WOEEncoder#

-

We start from example Implement a new converter -and then write the conversion.

-
def woeenc_to_sklearn(op_mapping):
-    "Converts WOEEncoder mapping to scikit-learn OrdinalEncoder."
-    cats = []
-    ws = []
-    for column_map in op_mapping.items():
-        col = column_map[0]
-        while len(cats) <= col:
-            cats.append('passthrough')
-            ws.append(None)
-        mapping = column_map[1]
-        intervals = []
-        weights = []
-        for i in range(mapping.shape[0]):
-            ind = mapping.index[i]
-            if ind < 0:
-                continue
-            intervals.append((float(ind - 1), float(ind), False, True))
-            weights.append(mapping.iloc[i])
-        cats[col] = intervals
-        ws[col] = weights
-
-    skl = WOETransformer(intervals=cats, weights=ws, onehot=False)
-    skl.fit(None)
-    return skl
-
-
-def woe_encoder_parser(
-        scope, model, inputs, custom_parsers=None):
-    if len(inputs) != 1:
-        raise RuntimeError(
-            "Unexpected number of inputs: %d != 1." % len(inputs))
-    if inputs[0].type is None:
-        raise RuntimeError(
-            "Unexpected type: %r." % (inputs[0], ))
-    alias = get_model_alias(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-    this_operator.inputs.append(inputs[0])
-    this_operator.outputs.append(
-        scope.declare_local_variable('catwoe', FloatTensorType()))
-    return this_operator.outputs
-
-
-def woe_encoder_shape_calculator(operator):
-    check_input_and_output_numbers(
-        operator, input_count_range=1, output_count_range=1)
-    input_dim = operator.inputs[0].get_first_dimension()
-    shape = operator.inputs[0].type.shape
-    second_dim = None if len(shape) != 2 else shape[1]
-    output_type = FloatTensorType([input_dim, second_dim])
-    operator.outputs[0].type = output_type
-
-
-def woe_encoder_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    X = operator.inputs[0]
-
-    sub = OnnxSubEstimator(op.ordinal_encoder, X,
-                           op_version=opv)
-    cast = OnnxCast(sub, op_version=opv, to=np.float32)
-    skl_ord = woeenc_to_sklearn(op.mapping)
-    cat = OnnxSubEstimator(skl_ord, cast, op_version=opv,
-                           output_names=operator.outputs[:1],
-                           input_types=[FloatTensorType()])
-    cat.add_to(scope, container)
-
-
-update_registered_converter(
-    WOEEncoder, "CategoricalEncoderWOEEncoder",
-    woe_encoder_shape_calculator,
-    woe_encoder_converter,
-    parser=woe_encoder_parser)
-
-
-

Let’s compute the output one a short example.

-
woe = WOEEncoder(cols=[0, 1]).fit(X, y)
-print(woe.transform(X[:5]))
-
-
-

Out:

-
          0         1
-0 -1.405712 -0.035947
-1 -1.724166 -0.035947
-2 -1.724166 -0.035947
-3 -1.724166 -0.035947
-4 -1.405712 -0.035947
-
-
-

Let’s check the ONNX conversion produces the same results.

-
woe_onx = to_onnx(woe, X[:1], target_opset=14)
-sess = InferenceSession(woe_onx.SerializeToString())
-print(sess.run(None, {'X': X[:5]})[0])
-
-
-

Out:

-
[[-1.4057125  -0.03594739]
- [-1.7241662  -0.03594739]
- [-1.7241662  -0.03594739]
- [-1.7241662  -0.03594739]
- [-1.4057125  -0.03594739]]
-
-
-

Total running time of the script: ( 0 minutes 0.459 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + + Converter for WOEEncoder from categorical_encoder - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Converter for WOEEncoder from categorical_encoder#

+

WOEEncoder +is a transformer implemented in categorical_encoder and as such, +any converter would not be included in sklearn-onnx which only +implements converters for scikit-learn models. Anyhow, this +example demonstrates how to implement a custom converter +for WOEEncoder. This code is not fully tested for all possible +cases the original encoder can handle.

+
+

A simple example#

+

Let’s take the Iris dataset. +Every feature is converter into integer.

+
import numpy as np
+from onnxruntime import InferenceSession
+from sklearn.datasets import load_iris
+from sklearn.preprocessing import OrdinalEncoder as SklOrdinalEncoder
+from category_encoders import WOEEncoder, OrdinalEncoder
+from skl2onnx import update_registered_converter, to_onnx, get_model_alias
+from skl2onnx.common.data_types import FloatTensorType
+from skl2onnx.common.utils import check_input_and_output_numbers
+from skl2onnx.algebra.onnx_ops import OnnxCast
+from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
+from skl2onnx.sklapi import WOETransformer
+import skl2onnx.sklapi.register  # noqa
+
+data = load_iris()
+X, y = data.data, data.target
+X = X.astype(np.int64)[:, :2]
+y = (y == 2).astype(np.int64)
+
+woe = WOEEncoder(cols=[0]).fit(X, y)
+print(woe.transform(X[:5]))
+
+
+
          0  1
+0 -1.405712  3
+1 -1.724166  3
+2 -1.724166  3
+3 -1.724166  3
+4 -1.405712  3
+
+
+

Let’s look into the trained parameters of the model. +It appears that WOEEncoder uses an OrdinalEncoder +but not the one from scikit-learn. We need to add a +converter for this model tool.

+
print("encoder", type(woe.ordinal_encoder), woe.ordinal_encoder)
+print("mapping", woe.mapping)
+print("encoder.mapping", woe.ordinal_encoder.mapping)
+print("encoder.cols", woe.ordinal_encoder.cols)
+
+
+
encoder <class 'category_encoders.ordinal.OrdinalEncoder'> OrdinalEncoder(cols=[0],
+               mapping=[{'col': 0, 'data_type': dtype('int64'),
+                         'mapping': 5.0    1
+4.0    2
+7.0    3
+6.0    4
+NaN   -2
+dtype: int64}])
+mapping {0: 0
+ 1   -1.405712
+ 2   -1.724166
+ 3    2.545531
+ 4    0.961411
+-1    0.000000
+-2    0.000000
+dtype: float64}
+encoder.mapping [{'col': 0, 'mapping': 5.0    1
+4.0    2
+7.0    3
+6.0    4
+NaN   -2
+dtype: int64, 'data_type': dtype('int64')}]
+encoder.cols [0]
+
+
+
+
+

Custom converter for OrdinalEncoder#

+

We start from example Implement a new converter +and then write the conversion.

+
def ordenc_to_sklearn(op_mapping):
+    "Converts OrdinalEncoder mapping to scikit-learn OrdinalEncoder."
+    cats = []
+    for column_map in op_mapping:
+        col = column_map['col']
+        while len(cats) <= col:
+            cats.append(None)
+        mapping = column_map['mapping']
+        res = []
+        for i in range(mapping.shape[0]):
+            if np.isnan(mapping.index[i]):
+                continue
+            ind = mapping.iloc[i]
+            while len(res) <= ind:
+                res.append(0)
+            res[ind] = mapping.index[i]
+        cats[col] = np.array(res, dtype=np.int64)
+
+    skl_ord = SklOrdinalEncoder(categories=cats, dtype=np.int64)
+    skl_ord.categories_ = cats
+    return skl_ord
+
+
+def ordinal_encoder_shape_calculator(operator):
+    check_input_and_output_numbers(
+        operator, input_count_range=1, output_count_range=1)
+    input_type = operator.inputs[0].type.__class__
+    input_dim = operator.inputs[0].get_first_dimension()
+    shape = operator.inputs[0].type.shape
+    second_dim = None if len(shape) != 2 else shape[1]
+    output_type = input_type([input_dim, second_dim])
+    operator.outputs[0].type = output_type
+
+
+def ordinal_encoder_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    X = operator.inputs[0]
+
+    skl_ord = ordenc_to_sklearn(op.mapping)
+    cat = OnnxSubEstimator(skl_ord, X, op_version=opv,
+                           output_names=operator.outputs[:1])
+    cat.add_to(scope, container)
+
+
+update_registered_converter(
+    OrdinalEncoder, "CategoricalEncoderOrdinalEncoder",
+    ordinal_encoder_shape_calculator,
+    ordinal_encoder_converter)
+
+
+

Let’s compute the output one a short example.

+
enc = OrdinalEncoder(cols=[0, 1])
+enc.fit(X)
+print(enc.transform(X[:5]))
+
+
+
   0  1
+0  1  1
+1  2  1
+2  2  1
+3  2  1
+4  1  1
+
+
+

Let’s check the ONNX conversion produces the same results.

+
ord_onx = to_onnx(enc, X[:1], target_opset=14)
+sess = InferenceSession(ord_onx.SerializeToString())
+print(sess.run(None, {'X': X[:5]})[0])
+
+
+
[[1 1]
+ [2 1]
+ [2 1]
+ [2 1]
+ [1 1]]
+
+
+

That works.

+
+
+

Custom converter for WOEEncoder#

+

We start from example Implement a new converter +and then write the conversion.

+
def woeenc_to_sklearn(op_mapping):
+    "Converts WOEEncoder mapping to scikit-learn OrdinalEncoder."
+    cats = []
+    ws = []
+    for column_map in op_mapping.items():
+        col = column_map[0]
+        while len(cats) <= col:
+            cats.append('passthrough')
+            ws.append(None)
+        mapping = column_map[1]
+        intervals = []
+        weights = []
+        for i in range(mapping.shape[0]):
+            ind = mapping.index[i]
+            if ind < 0:
+                continue
+            intervals.append((float(ind - 1), float(ind), False, True))
+            weights.append(mapping.iloc[i])
+        cats[col] = intervals
+        ws[col] = weights
+
+    skl = WOETransformer(intervals=cats, weights=ws, onehot=False)
+    skl.fit(None)
+    return skl
+
+
+def woe_encoder_parser(
+        scope, model, inputs, custom_parsers=None):
+    if len(inputs) != 1:
+        raise RuntimeError(
+            "Unexpected number of inputs: %d != 1." % len(inputs))
+    if inputs[0].type is None:
+        raise RuntimeError(
+            "Unexpected type: %r." % (inputs[0], ))
+    alias = get_model_alias(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    this_operator.inputs.append(inputs[0])
+    this_operator.outputs.append(
+        scope.declare_local_variable('catwoe', FloatTensorType()))
+    return this_operator.outputs
+
+
+def woe_encoder_shape_calculator(operator):
+    check_input_and_output_numbers(
+        operator, input_count_range=1, output_count_range=1)
+    input_dim = operator.inputs[0].get_first_dimension()
+    shape = operator.inputs[0].type.shape
+    second_dim = None if len(shape) != 2 else shape[1]
+    output_type = FloatTensorType([input_dim, second_dim])
+    operator.outputs[0].type = output_type
+
+
+def woe_encoder_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    X = operator.inputs[0]
+
+    sub = OnnxSubEstimator(op.ordinal_encoder, X,
+                           op_version=opv)
+    cast = OnnxCast(sub, op_version=opv, to=np.float32)
+    skl_ord = woeenc_to_sklearn(op.mapping)
+    cat = OnnxSubEstimator(skl_ord, cast, op_version=opv,
+                           output_names=operator.outputs[:1],
+                           input_types=[FloatTensorType()])
+    cat.add_to(scope, container)
+
+
+update_registered_converter(
+    WOEEncoder, "CategoricalEncoderWOEEncoder",
+    woe_encoder_shape_calculator,
+    woe_encoder_converter,
+    parser=woe_encoder_parser)
+
+
+

Let’s compute the output one a short example.

+
woe = WOEEncoder(cols=[0, 1]).fit(X, y)
+print(woe.transform(X[:5]))
+
+
+
          0         1
+0 -1.405712 -0.035947
+1 -1.724166 -0.035947
+2 -1.724166 -0.035947
+3 -1.724166 -0.035947
+4 -1.405712 -0.035947
+
+
+

Let’s check the ONNX conversion produces the same results.

+
woe_onx = to_onnx(woe, X[:1], target_opset=14)
+sess = InferenceSession(woe_onx.SerializeToString())
+print(sess.run(None, {'X': X[:5]})[0])
+
+
+
[[-1.4057125  -0.03594739]
+ [-1.7241662  -0.03594739]
+ [-1.7241662  -0.03594739]
+ [-1.7241662  -0.03594739]
+ [-1.4057125  -0.03594739]]
+
+
+

Total running time of the script: ( 0 minutes 0.173 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_cbegin_opset.html b/auto_tutorial/plot_cbegin_opset.html index a36f869c1..358a21722 100644 --- a/auto_tutorial/plot_cbegin_opset.html +++ b/auto_tutorial/plot_cbegin_opset.html @@ -1,5166 +1,4826 @@ - - - - - - - - - What is the opset number? — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

What is the opset number?#

-

Every library is versioned. scikit-learn may change -the implementation of a specific model. That happens -for example with the SVC model where -the parameter break_ties was added in 0.22. ONNX -does also have a version called opset number. -Operator ArgMin was added in opset 1 and changed in opset -11, 12, 13. Sometimes, it is updated to extend the list -of types it supports, sometimes, it moves a parameter -into the input list. The runtime used to deploy the model -does not implement a new version, in that case, a model -must be converted by usually using the most recent opset -supported by the runtime, we call that opset the -targeted opset. An ONNX graph only contains -one unique opset, every node must be described following -the specifications defined by the latest opset below the -targeted opset.

-

This example considers an IsolationForest and digs into opsets.

- -
-

Data#

-

A simple example.

-
from onnx.defs import onnx_opset_version
-from skl2onnx import to_onnx
-import numpy
-import matplotlib.pyplot as plt
-from sklearn.ensemble import IsolationForest
-from sklearn.datasets import make_blobs
-
-X, y = make_blobs(n_samples=100, n_features=2)
-
-model = IsolationForest(n_estimators=3)
-model.fit(X)
-labels = model.predict(X)
-
-fig, ax = plt.subplots(1, 1)
-for k in (-1, 1):
-    ax.plot(X[labels == k, 0], X[labels == k, 1], 'o', label="cl%d" % k)
-ax.set_title("Sample")
-
-
-Sample
-
-

ONNX#

-
onx = to_onnx(model, X[:1].astype(numpy.float32),
-              target_opset={'': 15, 'ai.onnx.ml': 2})
-print(onx)
-
-
-

Out:

-
ir_version: 8
-producer_name: "skl2onnx"
-producer_version: "1.11.2"
-domain: "ai.onnx"
-model_version: 0
-doc_string: ""
-graph {
-  node {
-    input: "X"
-    input: "node_sample0_Gathercst"
-    output: "node_sample0_output0"
-    name: "node_sample0_Gather"
-    op_type: "Gather"
-    attribute {
-      name: "axis"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample0_output0"
-    output: "node_sample0_Y0"
-    name: "node_sample0_TreeEnsembleRegressor"
-    op_type: "TreeEnsembleRegressor"
-    attribute {
-      name: "n_targets"
-      i: 1
-      type: INT
-    }
-    attribute {
-      name: "nodes_falsenodeids"
-      ints: 54
-      ints: 25
-      ints: 6
-      ints: 5
-      ints: 0
-      ints: 0
-      ints: 16
-      ints: 15
-      ints: 12
-      ints: 11
-      ints: 0
-      ints: 0
-      ints: 14
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 18
-      ints: 0
-      ints: 22
-      ints: 21
-      ints: 0
-      ints: 0
-      ints: 24
-      ints: 0
-      ints: 0
-      ints: 39
-      ints: 30
-      ints: 29
-      ints: 0
-      ints: 0
-      ints: 32
-      ints: 0
-      ints: 36
-      ints: 35
-      ints: 0
-      ints: 0
-      ints: 38
-      ints: 0
-      ints: 0
-      ints: 47
-      ints: 46
-      ints: 45
-      ints: 44
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 49
-      ints: 0
-      ints: 53
-      ints: 52
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 56
-      ints: 0
-      ints: 72
-      ints: 67
-      ints: 62
-      ints: 61
-      ints: 0
-      ints: 0
-      ints: 66
-      ints: 65
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 71
-      ints: 70
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_featureids"
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_hitrates"
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    attribute {
-      name: "nodes_missing_value_tracks_true"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_modes"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      type: STRINGS
-    }
-    attribute {
-      name: "nodes_nodeids"
-      ints: 0
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 5
-      ints: 6
-      ints: 7
-      ints: 8
-      ints: 9
-      ints: 10
-      ints: 11
-      ints: 12
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 16
-      ints: 17
-      ints: 18
-      ints: 19
-      ints: 20
-      ints: 21
-      ints: 22
-      ints: 23
-      ints: 24
-      ints: 25
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 30
-      ints: 31
-      ints: 32
-      ints: 33
-      ints: 34
-      ints: 35
-      ints: 36
-      ints: 37
-      ints: 38
-      ints: 39
-      ints: 40
-      ints: 41
-      ints: 42
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 49
-      ints: 50
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 56
-      ints: 57
-      ints: 58
-      ints: 59
-      ints: 60
-      ints: 61
-      ints: 62
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 68
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "nodes_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_truenodeids"
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 0
-      ints: 0
-      ints: 7
-      ints: 8
-      ints: 9
-      ints: 10
-      ints: 0
-      ints: 0
-      ints: 13
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 17
-      ints: 0
-      ints: 19
-      ints: 20
-      ints: 0
-      ints: 0
-      ints: 23
-      ints: 0
-      ints: 0
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 0
-      ints: 0
-      ints: 31
-      ints: 0
-      ints: 33
-      ints: 34
-      ints: 0
-      ints: 0
-      ints: 37
-      ints: 0
-      ints: 0
-      ints: 40
-      ints: 41
-      ints: 42
-      ints: 43
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 48
-      ints: 0
-      ints: 50
-      ints: 51
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 55
-      ints: 0
-      ints: 57
-      ints: 58
-      ints: 59
-      ints: 60
-      ints: 0
-      ints: 0
-      ints: 63
-      ints: 64
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 68
-      ints: 69
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_values"
-      floats: 10.021278381347656
-      floats: 3.7887494564056396
-      floats: -7.121064186096191
-      floats: 4.416437149047852
-      floats: 0.0
-      floats: 0.0
-      floats: -1.7588245868682861
-      floats: 7.825988292694092
-      floats: 4.649615287780762
-      floats: 3.2951412200927734
-      floats: 0.0
-      floats: 0.0
-      floats: 6.6710991859436035
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 4.765402317047119
-      floats: 0.0
-      floats: 6.541184425354004
-      floats: -1.2423985004425049
-      floats: 0.0
-      floats: 0.0
-      floats: 7.697707176208496
-      floats: 0.0
-      floats: 0.0
-      floats: 9.153670310974121
-      floats: 6.013628005981445
-      floats: 8.105496406555176
-      floats: 0.0
-      floats: 0.0
-      floats: 7.426864147186279
-      floats: 0.0
-      floats: 8.715617179870605
-      floats: 8.431848526000977
-      floats: 0.0
-      floats: 0.0
-      floats: 7.730372905731201
-      floats: 0.0
-      floats: 0.0
-      floats: 6.437074661254883
-      floats: 9.96568489074707
-      floats: 9.521824836730957
-      floats: 9.370474815368652
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 9.491046905517578
-      floats: 0.0
-      floats: 8.651312828063965
-      floats: 9.867877006530762
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 5.1377949714660645
-      floats: 0.0
-      floats: 11.692091941833496
-      floats: 10.630556106567383
-      floats: 10.18297004699707
-      floats: 8.508511543273926
-      floats: 0.0
-      floats: 0.0
-      floats: 7.681608200073242
-      floats: 10.399255752563477
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 8.249090194702148
-      floats: 11.212265014648438
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      type: FLOATS
-    }
-    attribute {
-      name: "post_transform"
-      s: "NONE"
-      type: STRING
-    }
-    attribute {
-      name: "target_ids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_nodeids"
-      ints: 4
-      ints: 5
-      ints: 10
-      ints: 11
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 20
-      ints: 21
-      ints: 23
-      ints: 24
-      ints: 28
-      ints: 29
-      ints: 31
-      ints: 34
-      ints: 35
-      ints: 37
-      ints: 38
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 48
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 55
-      ints: 60
-      ints: 61
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "target_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_weights"
-      floats: 4.0
-      floats: 5.0
-      floats: 10.0
-      floats: 11.0
-      floats: 13.0
-      floats: 14.0
-      floats: 15.0
-      floats: 17.0
-      floats: 20.0
-      floats: 21.0
-      floats: 23.0
-      floats: 24.0
-      floats: 28.0
-      floats: 29.0
-      floats: 31.0
-      floats: 34.0
-      floats: 35.0
-      floats: 37.0
-      floats: 38.0
-      floats: 43.0
-      floats: 44.0
-      floats: 45.0
-      floats: 46.0
-      floats: 48.0
-      floats: 51.0
-      floats: 52.0
-      floats: 53.0
-      floats: 55.0
-      floats: 60.0
-      floats: 61.0
-      floats: 64.0
-      floats: 65.0
-      floats: 66.0
-      floats: 69.0
-      floats: 70.0
-      floats: 71.0
-      floats: 72.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "X"
-    input: "node_sample0_Gathercst"
-    output: "node_sample1_output0"
-    name: "node_sample1_Gather"
-    op_type: "Gather"
-    attribute {
-      name: "axis"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "X"
-    input: "node_sample0_Gathercst"
-    output: "node_sample2_output0"
-    name: "node_sample2_Gather"
-    op_type: "Gather"
-    attribute {
-      name: "axis"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample0_Y0"
-    output: "node_sample0_output02"
-    name: "node_sample0_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample0_Y0"
-    output: "path_length0_output0"
-    name: "path_length0_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample1_output0"
-    output: "node_sample1_Y0"
-    name: "node_sample1_TreeEnsembleRegressor"
-    op_type: "TreeEnsembleRegressor"
-    attribute {
-      name: "n_targets"
-      i: 1
-      type: INT
-    }
-    attribute {
-      name: "nodes_falsenodeids"
-      ints: 30
-      ints: 29
-      ints: 6
-      ints: 5
-      ints: 0
-      ints: 0
-      ints: 16
-      ints: 9
-      ints: 0
-      ints: 13
-      ints: 12
-      ints: 0
-      ints: 0
-      ints: 15
-      ints: 0
-      ints: 0
-      ints: 24
-      ints: 21
-      ints: 20
-      ints: 0
-      ints: 0
-      ints: 23
-      ints: 0
-      ints: 0
-      ints: 28
-      ints: 27
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 48
-      ints: 33
-      ints: 0
-      ints: 43
-      ints: 36
-      ints: 0
-      ints: 40
-      ints: 39
-      ints: 0
-      ints: 0
-      ints: 42
-      ints: 0
-      ints: 0
-      ints: 45
-      ints: 0
-      ints: 47
-      ints: 0
-      ints: 0
-      ints: 66
-      ints: 63
-      ints: 58
-      ints: 55
-      ints: 54
-      ints: 0
-      ints: 0
-      ints: 57
-      ints: 0
-      ints: 0
-      ints: 60
-      ints: 0
-      ints: 62
-      ints: 0
-      ints: 0
-      ints: 65
-      ints: 0
-      ints: 0
-      ints: 68
-      ints: 0
-      ints: 72
-      ints: 71
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_featureids"
-      ints: 1
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_hitrates"
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    attribute {
-      name: "nodes_missing_value_tracks_true"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_modes"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      type: STRINGS
-    }
-    attribute {
-      name: "nodes_nodeids"
-      ints: 0
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 5
-      ints: 6
-      ints: 7
-      ints: 8
-      ints: 9
-      ints: 10
-      ints: 11
-      ints: 12
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 16
-      ints: 17
-      ints: 18
-      ints: 19
-      ints: 20
-      ints: 21
-      ints: 22
-      ints: 23
-      ints: 24
-      ints: 25
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 30
-      ints: 31
-      ints: 32
-      ints: 33
-      ints: 34
-      ints: 35
-      ints: 36
-      ints: 37
-      ints: 38
-      ints: 39
-      ints: 40
-      ints: 41
-      ints: 42
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 49
-      ints: 50
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 56
-      ints: 57
-      ints: 58
-      ints: 59
-      ints: 60
-      ints: 61
-      ints: 62
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 68
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "nodes_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_truenodeids"
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 0
-      ints: 0
-      ints: 7
-      ints: 8
-      ints: 0
-      ints: 10
-      ints: 11
-      ints: 0
-      ints: 0
-      ints: 14
-      ints: 0
-      ints: 0
-      ints: 17
-      ints: 18
-      ints: 19
-      ints: 0
-      ints: 0
-      ints: 22
-      ints: 0
-      ints: 0
-      ints: 25
-      ints: 26
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 31
-      ints: 32
-      ints: 0
-      ints: 34
-      ints: 35
-      ints: 0
-      ints: 37
-      ints: 38
-      ints: 0
-      ints: 0
-      ints: 41
-      ints: 0
-      ints: 0
-      ints: 44
-      ints: 0
-      ints: 46
-      ints: 0
-      ints: 0
-      ints: 49
-      ints: 50
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 0
-      ints: 0
-      ints: 56
-      ints: 0
-      ints: 0
-      ints: 59
-      ints: 0
-      ints: 61
-      ints: 0
-      ints: 0
-      ints: 64
-      ints: 0
-      ints: 0
-      ints: 67
-      ints: 0
-      ints: 69
-      ints: 70
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_values"
-      floats: 3.8512446880340576
-      floats: -0.5676265358924866
-      floats: -6.980898380279541
-      floats: 4.714104175567627
-      floats: 0.0
-      floats: 0.0
-      floats: -5.093116760253906
-      floats: -6.655633926391602
-      floats: 0.0
-      floats: 2.331597328186035
-      floats: -5.791065692901611
-      floats: 0.0
-      floats: 0.0
-      floats: -5.753363609313965
-      floats: 0.0
-      floats: 0.0
-      floats: -3.8087997436523438
-      floats: 5.919134140014648
-      floats: 2.3382115364074707
-      floats: 0.0
-      floats: 0.0
-      floats: 6.681684494018555
-      floats: 0.0
-      floats: 0.0
-      floats: -1.0638467073440552
-      floats: 5.241893768310547
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 9.605061531066895
-      floats: 7.560668468475342
-      floats: 0.0
-      floats: 8.219744682312012
-      floats: 5.37798547744751
-      floats: 0.0
-      floats: 7.676764011383057
-      floats: 8.128286361694336
-      floats: 0.0
-      floats: 0.0
-      floats: 9.215782165527344
-      floats: 0.0
-      floats: 0.0
-      floats: 8.422836303710938
-      floats: 0.0
-      floats: 8.757240295410156
-      floats: 0.0
-      floats: 0.0
-      floats: 10.749845504760742
-      floats: 8.670648574829102
-      floats: 10.19068717956543
-      floats: 7.231447219848633
-      floats: 5.572278022766113
-      floats: 0.0
-      floats: 0.0
-      floats: 9.892742156982422
-      floats: 0.0
-      floats: 0.0
-      floats: 5.543532371520996
-      floats: 0.0
-      floats: 8.146027565002441
-      floats: 0.0
-      floats: 0.0
-      floats: 10.089201927185059
-      floats: 0.0
-      floats: 0.0
-      floats: 7.585289001464844
-      floats: 0.0
-      floats: 11.574915885925293
-      floats: 11.187359809875488
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      type: FLOATS
-    }
-    attribute {
-      name: "post_transform"
-      s: "NONE"
-      type: STRING
-    }
-    attribute {
-      name: "target_ids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_nodeids"
-      ints: 4
-      ints: 5
-      ints: 8
-      ints: 11
-      ints: 12
-      ints: 14
-      ints: 15
-      ints: 19
-      ints: 20
-      ints: 22
-      ints: 23
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 32
-      ints: 35
-      ints: 38
-      ints: 39
-      ints: 41
-      ints: 42
-      ints: 44
-      ints: 46
-      ints: 47
-      ints: 53
-      ints: 54
-      ints: 56
-      ints: 57
-      ints: 59
-      ints: 61
-      ints: 62
-      ints: 64
-      ints: 65
-      ints: 67
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "target_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_weights"
-      floats: 4.0
-      floats: 5.0
-      floats: 8.0
-      floats: 11.0
-      floats: 12.0
-      floats: 14.0
-      floats: 15.0
-      floats: 19.0
-      floats: 20.0
-      floats: 22.0
-      floats: 23.0
-      floats: 26.0
-      floats: 27.0
-      floats: 28.0
-      floats: 29.0
-      floats: 32.0
-      floats: 35.0
-      floats: 38.0
-      floats: 39.0
-      floats: 41.0
-      floats: 42.0
-      floats: 44.0
-      floats: 46.0
-      floats: 47.0
-      floats: 53.0
-      floats: 54.0
-      floats: 56.0
-      floats: 57.0
-      floats: 59.0
-      floats: 61.0
-      floats: 62.0
-      floats: 64.0
-      floats: 65.0
-      floats: 67.0
-      floats: 70.0
-      floats: 71.0
-      floats: 72.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample2_output0"
-    output: "node_sample2_Y0"
-    name: "node_sample2_TreeEnsembleRegressor"
-    op_type: "TreeEnsembleRegressor"
-    attribute {
-      name: "n_targets"
-      i: 1
-      type: INT
-    }
-    attribute {
-      name: "nodes_falsenodeids"
-      ints: 56
-      ints: 41
-      ints: 22
-      ints: 19
-      ints: 12
-      ints: 9
-      ints: 8
-      ints: 0
-      ints: 0
-      ints: 11
-      ints: 0
-      ints: 0
-      ints: 16
-      ints: 15
-      ints: 0
-      ints: 0
-      ints: 18
-      ints: 0
-      ints: 0
-      ints: 21
-      ints: 0
-      ints: 0
-      ints: 38
-      ints: 31
-      ints: 28
-      ints: 27
-      ints: 0
-      ints: 0
-      ints: 30
-      ints: 0
-      ints: 0
-      ints: 35
-      ints: 34
-      ints: 0
-      ints: 0
-      ints: 37
-      ints: 0
-      ints: 0
-      ints: 40
-      ints: 0
-      ints: 0
-      ints: 55
-      ints: 52
-      ints: 49
-      ints: 48
-      ints: 47
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 51
-      ints: 0
-      ints: 0
-      ints: 54
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 68
-      ints: 67
-      ints: 60
-      ints: 0
-      ints: 66
-      ints: 65
-      ints: 64
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 84
-      ints: 83
-      ints: 78
-      ints: 75
-      ints: 74
-      ints: 0
-      ints: 0
-      ints: 77
-      ints: 0
-      ints: 0
-      ints: 82
-      ints: 81
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 92
-      ints: 91
-      ints: 88
-      ints: 0
-      ints: 90
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 98
-      ints: 97
-      ints: 96
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 102
-      ints: 101
-      ints: 0
-      ints: 0
-      ints: 104
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_featureids"
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 1
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 1
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_hitrates"
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    attribute {
-      name: "nodes_missing_value_tracks_true"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_modes"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      strings: "BRANCH_LEQ"
-      strings: "LEAF"
-      strings: "LEAF"
-      type: STRINGS
-    }
-    attribute {
-      name: "nodes_nodeids"
-      ints: 0
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 5
-      ints: 6
-      ints: 7
-      ints: 8
-      ints: 9
-      ints: 10
-      ints: 11
-      ints: 12
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 16
-      ints: 17
-      ints: 18
-      ints: 19
-      ints: 20
-      ints: 21
-      ints: 22
-      ints: 23
-      ints: 24
-      ints: 25
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 30
-      ints: 31
-      ints: 32
-      ints: 33
-      ints: 34
-      ints: 35
-      ints: 36
-      ints: 37
-      ints: 38
-      ints: 39
-      ints: 40
-      ints: 41
-      ints: 42
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 49
-      ints: 50
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 56
-      ints: 57
-      ints: 58
-      ints: 59
-      ints: 60
-      ints: 61
-      ints: 62
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 68
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      ints: 73
-      ints: 74
-      ints: 75
-      ints: 76
-      ints: 77
-      ints: 78
-      ints: 79
-      ints: 80
-      ints: 81
-      ints: 82
-      ints: 83
-      ints: 84
-      ints: 85
-      ints: 86
-      ints: 87
-      ints: 88
-      ints: 89
-      ints: 90
-      ints: 91
-      ints: 92
-      ints: 93
-      ints: 94
-      ints: 95
-      ints: 96
-      ints: 97
-      ints: 98
-      ints: 99
-      ints: 100
-      ints: 101
-      ints: 102
-      ints: 103
-      ints: 104
-      type: INTS
-    }
-    attribute {
-      name: "nodes_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_truenodeids"
-      ints: 1
-      ints: 2
-      ints: 3
-      ints: 4
-      ints: 5
-      ints: 6
-      ints: 7
-      ints: 0
-      ints: 0
-      ints: 10
-      ints: 0
-      ints: 0
-      ints: 13
-      ints: 14
-      ints: 0
-      ints: 0
-      ints: 17
-      ints: 0
-      ints: 0
-      ints: 20
-      ints: 0
-      ints: 0
-      ints: 23
-      ints: 24
-      ints: 25
-      ints: 26
-      ints: 0
-      ints: 0
-      ints: 29
-      ints: 0
-      ints: 0
-      ints: 32
-      ints: 33
-      ints: 0
-      ints: 0
-      ints: 36
-      ints: 0
-      ints: 0
-      ints: 39
-      ints: 0
-      ints: 0
-      ints: 42
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 50
-      ints: 0
-      ints: 0
-      ints: 53
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 57
-      ints: 58
-      ints: 59
-      ints: 0
-      ints: 61
-      ints: 62
-      ints: 63
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      ints: 73
-      ints: 0
-      ints: 0
-      ints: 76
-      ints: 0
-      ints: 0
-      ints: 79
-      ints: 80
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 85
-      ints: 86
-      ints: 87
-      ints: 0
-      ints: 89
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 93
-      ints: 94
-      ints: 95
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 99
-      ints: 100
-      ints: 0
-      ints: 0
-      ints: 103
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "nodes_values"
-      floats: -1.9907585382461548
-      floats: 6.839632511138916
-      floats: 3.6026360988616943
-      floats: 3.51365327835083
-      floats: 2.635514974594116
-      floats: 2.1891138553619385
-      floats: 2.092236280441284
-      floats: 0.0
-      floats: 0.0
-      floats: 2.345102310180664
-      floats: 0.0
-      floats: 0.0
-      floats: 3.0827603340148926
-      floats: 2.872518539428711
-      floats: 0.0
-      floats: 0.0
-      floats: -4.910643100738525
-      floats: 0.0
-      floats: 0.0
-      floats: 3.52667498588562
-      floats: 0.0
-      floats: 0.0
-      floats: -2.4622914791107178
-      floats: -6.068970680236816
-      floats: -6.634273052215576
-      floats: -6.811461448669434
-      floats: 0.0
-      floats: 0.0
-      floats: -6.576728343963623
-      floats: 0.0
-      floats: 0.0
-      floats: -3.355783224105835
-      floats: -4.942788600921631
-      floats: 0.0
-      floats: 0.0
-      floats: 6.617758750915527
-      floats: 0.0
-      floats: 0.0
-      floats: 5.890446186065674
-      floats: 0.0
-      floats: 0.0
-      floats: -2.2357113361358643
-      floats: 7.418643474578857
-      floats: 7.022924423217773
-      floats: -2.456209421157837
-      floats: -2.7438905239105225
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 7.2257399559021
-      floats: 0.0
-      floats: 0.0
-      floats: 7.698291778564453
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.8175256252288818
-      floats: -0.7507614493370056
-      floats: 4.318005084991455
-      floats: 0.0
-      floats: 6.969293594360352
-      floats: -1.5351403951644897
-      floats: -1.659111738204956
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 9.485491752624512
-      floats: 8.976116180419922
-      floats: 8.002398490905762
-      floats: 6.623079299926758
-      floats: 9.125885009765625
-      floats: 0.0
-      floats: 0.0
-      floats: 7.444906711578369
-      floats: 0.0
-      floats: 0.0
-      floats: 8.453536033630371
-      floats: 8.412899017333984
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 7.705773830413818
-      floats: 7.476274013519287
-      floats: 4.589108467102051
-      floats: 0.0
-      floats: 10.508894920349121
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 8.615400314331055
-      floats: 10.795989990234375
-      floats: 8.093504905700684
-      floats: 0.0
-      floats: 0.0
-      floats: 0.0
-      floats: 8.830120086669922
-      floats: 10.28431224822998
-      floats: 0.0
-      floats: 0.0
-      floats: 8.931217193603516
-      floats: 0.0
-      floats: 0.0
-      type: FLOATS
-    }
-    attribute {
-      name: "post_transform"
-      s: "NONE"
-      type: STRING
-    }
-    attribute {
-      name: "target_ids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_nodeids"
-      ints: 7
-      ints: 8
-      ints: 10
-      ints: 11
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 18
-      ints: 20
-      ints: 21
-      ints: 26
-      ints: 27
-      ints: 29
-      ints: 30
-      ints: 33
-      ints: 34
-      ints: 36
-      ints: 37
-      ints: 39
-      ints: 40
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 50
-      ints: 51
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 59
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 73
-      ints: 74
-      ints: 76
-      ints: 77
-      ints: 80
-      ints: 81
-      ints: 82
-      ints: 83
-      ints: 87
-      ints: 89
-      ints: 90
-      ints: 91
-      ints: 95
-      ints: 96
-      ints: 97
-      ints: 100
-      ints: 101
-      ints: 103
-      ints: 104
-      type: INTS
-    }
-    attribute {
-      name: "target_treeids"
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      ints: 0
-      type: INTS
-    }
-    attribute {
-      name: "target_weights"
-      floats: 7.0
-      floats: 8.0
-      floats: 10.0
-      floats: 11.0
-      floats: 14.0
-      floats: 15.0
-      floats: 17.0
-      floats: 18.0
-      floats: 20.0
-      floats: 21.0
-      floats: 26.0
-      floats: 27.0
-      floats: 29.0
-      floats: 30.0
-      floats: 33.0
-      floats: 34.0
-      floats: 36.0
-      floats: 37.0
-      floats: 39.0
-      floats: 40.0
-      floats: 46.0
-      floats: 47.0
-      floats: 48.0
-      floats: 50.0
-      floats: 51.0
-      floats: 53.0
-      floats: 54.0
-      floats: 55.0
-      floats: 59.0
-      floats: 63.0
-      floats: 64.0
-      floats: 65.0
-      floats: 66.0
-      floats: 67.0
-      floats: 73.0
-      floats: 74.0
-      floats: 76.0
-      floats: 77.0
-      floats: 80.0
-      floats: 81.0
-      floats: 82.0
-      floats: 83.0
-      floats: 87.0
-      floats: 89.0
-      floats: 90.0
-      floats: 91.0
-      floats: 95.0
-      floats: 96.0
-      floats: 97.0
-      floats: 100.0
-      floats: 101.0
-      floats: 103.0
-      floats: 104.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "path_length0_output0"
-    output: "path_length0_Y0"
-    name: "path_length0_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 4
-      ints: 5
-      ints: 10
-      ints: 11
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 20
-      ints: 21
-      ints: 23
-      ints: 24
-      ints: 28
-      ints: 29
-      ints: 31
-      ints: 34
-      ints: 35
-      ints: 37
-      ints: 38
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 48
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 55
-      ints: 60
-      ints: 61
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 5.0
-      floats: 5.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 6.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 6.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 3.0
-      floats: 7.0
-      floats: 7.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 7.0
-      floats: 7.0
-      floats: 6.0
-      floats: 4.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample0_output02"
-    output: "node_sample0_Y02"
-    name: "node_sample0_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 4
-      ints: 5
-      ints: 10
-      ints: 11
-      ints: 13
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 20
-      ints: 21
-      ints: 23
-      ints: 24
-      ints: 28
-      ints: 29
-      ints: 31
-      ints: 34
-      ints: 35
-      ints: 37
-      ints: 38
-      ints: 43
-      ints: 44
-      ints: 45
-      ints: 46
-      ints: 48
-      ints: 51
-      ints: 52
-      ints: 53
-      ints: 55
-      ints: 60
-      ints: 61
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 69
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 1.0
-      floats: 1.0
-      floats: 12.0
-      floats: 15.0
-      floats: 21.0
-      floats: 10.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 7.0
-      floats: 1.0
-      floats: 3.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 2.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample1_Y0"
-    output: "path_length1_output0"
-    name: "path_length1_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample1_Y0"
-    output: "node_sample1_output02"
-    name: "node_sample1_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample2_Y0"
-    output: "path_length2_output0"
-    name: "path_length2_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample2_Y0"
-    output: "node_sample2_output02"
-    name: "node_sample2_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample0_Y02"
-    input: "path_length0_Reshapecst"
-    output: "node_sample0_reshaped0"
-    name: "node_sample0_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "path_length0_Y0"
-    input: "path_length0_Reshapecst"
-    output: "path_length0_reshaped0"
-    name: "path_length0_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "path_length1_output0"
-    output: "path_length1_Y0"
-    name: "path_length1_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 4
-      ints: 5
-      ints: 8
-      ints: 11
-      ints: 12
-      ints: 14
-      ints: 15
-      ints: 19
-      ints: 20
-      ints: 22
-      ints: 23
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 32
-      ints: 35
-      ints: 38
-      ints: 39
-      ints: 41
-      ints: 42
-      ints: 44
-      ints: 46
-      ints: 47
-      ints: 53
-      ints: 54
-      ints: 56
-      ints: 57
-      ints: 59
-      ints: 61
-      ints: 62
-      ints: 64
-      ints: 65
-      ints: 67
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 5.0
-      floats: 5.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 3.0
-      floats: 4.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 7.0
-      floats: 7.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 6.0
-      floats: 5.0
-      floats: 7.0
-      floats: 7.0
-      floats: 6.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample1_output02"
-    output: "node_sample1_Y02"
-    name: "node_sample1_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 4
-      ints: 5
-      ints: 8
-      ints: 11
-      ints: 12
-      ints: 14
-      ints: 15
-      ints: 19
-      ints: 20
-      ints: 22
-      ints: 23
-      ints: 26
-      ints: 27
-      ints: 28
-      ints: 29
-      ints: 32
-      ints: 35
-      ints: 38
-      ints: 39
-      ints: 41
-      ints: 42
-      ints: 44
-      ints: 46
-      ints: 47
-      ints: 53
-      ints: 54
-      ints: 56
-      ints: 57
-      ints: 59
-      ints: 61
-      ints: 62
-      ints: 64
-      ints: 65
-      ints: 67
-      ints: 70
-      ints: 71
-      ints: 72
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 14.0
-      floats: 7.0
-      floats: 1.0
-      floats: 6.0
-      floats: 1.0
-      floats: 1.0
-      floats: 5.0
-      floats: 24.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 2.0
-      floats: 9.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 2.0
-      floats: 1.0
-      floats: 2.0
-      floats: 1.0
-      floats: 2.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "path_length2_output0"
-    output: "path_length2_Y0"
-    name: "path_length2_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 7
-      ints: 8
-      ints: 10
-      ints: 11
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 18
-      ints: 20
-      ints: 21
-      ints: 26
-      ints: 27
-      ints: 29
-      ints: 30
-      ints: 33
-      ints: 34
-      ints: 36
-      ints: 37
-      ints: 39
-      ints: 40
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 50
-      ints: 51
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 59
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 73
-      ints: 74
-      ints: 76
-      ints: 77
-      ints: 80
-      ints: 81
-      ints: 82
-      ints: 83
-      ints: 87
-      ints: 89
-      ints: 90
-      ints: 91
-      ints: 95
-      ints: 96
-      ints: 97
-      ints: 100
-      ints: 101
-      ints: 103
-      ints: 104
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 7.0
-      floats: 7.0
-      floats: 6.0
-      floats: 6.0
-      floats: 4.0
-      floats: 5.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 6.0
-      floats: 4.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 5.0
-      floats: 7.0
-      floats: 8.0
-      floats: 8.0
-      floats: 6.0
-      floats: 8.0
-      floats: 8.0
-      floats: 7.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      floats: 8.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample2_output02"
-    output: "node_sample2_Y02"
-    name: "node_sample2_LabelEncoder"
-    op_type: "LabelEncoder"
-    attribute {
-      name: "keys_int64s"
-      ints: 7
-      ints: 8
-      ints: 10
-      ints: 11
-      ints: 14
-      ints: 15
-      ints: 17
-      ints: 18
-      ints: 20
-      ints: 21
-      ints: 26
-      ints: 27
-      ints: 29
-      ints: 30
-      ints: 33
-      ints: 34
-      ints: 36
-      ints: 37
-      ints: 39
-      ints: 40
-      ints: 46
-      ints: 47
-      ints: 48
-      ints: 50
-      ints: 51
-      ints: 53
-      ints: 54
-      ints: 55
-      ints: 59
-      ints: 63
-      ints: 64
-      ints: 65
-      ints: 66
-      ints: 67
-      ints: 73
-      ints: 74
-      ints: 76
-      ints: 77
-      ints: 80
-      ints: 81
-      ints: 82
-      ints: 83
-      ints: 87
-      ints: 89
-      ints: 90
-      ints: 91
-      ints: 95
-      ints: 96
-      ints: 97
-      ints: 100
-      ints: 101
-      ints: 103
-      ints: 104
-      type: INTS
-    }
-    attribute {
-      name: "values_floats"
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 2.0
-      floats: 1.0
-      floats: 2.0
-      floats: 3.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 2.0
-      floats: 2.0
-      floats: 1.0
-      floats: 3.0
-      floats: 9.0
-      floats: 6.0
-      floats: 8.0
-      floats: 2.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 4.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 3.0
-      floats: 2.0
-      floats: 1.0
-      floats: 8.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 6.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      floats: 1.0
-      type: FLOATS
-    }
-    domain: "ai.onnx.ml"
-  }
-  node {
-    input: "node_sample0_reshaped0"
-    input: "dec_Powcst"
-    output: "eq2_0_C0"
-    name: "eq2_0_Equal"
-    op_type: "Equal"
-    domain: ""
-  }
-  node {
-    input: "node_sample0_reshaped0"
-    input: "dec_Powcst"
-    output: "plus2_0_C0"
-    name: "plus2_0_Greater"
-    op_type: "Greater"
-    domain: ""
-  }
-  node {
-    input: "path_length1_Y0"
-    input: "path_length0_Reshapecst"
-    output: "path_length1_reshaped0"
-    name: "path_length1_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample1_Y02"
-    input: "path_length0_Reshapecst"
-    output: "node_sample1_reshaped0"
-    name: "node_sample1_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "path_length2_Y0"
-    input: "path_length0_Reshapecst"
-    output: "path_length2_reshaped0"
-    name: "path_length2_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample2_Y02"
-    input: "path_length0_Reshapecst"
-    output: "node_sample2_reshaped0"
-    name: "node_sample2_Reshape"
-    op_type: "Reshape"
-    attribute {
-      name: "allowzero"
-      i: 0
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "plus2_0_C0"
-    output: "plus2_0_output0"
-    name: "plus2_0_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "eq2_0_C0"
-    output: "eq2_0_output0"
-    name: "eq2_0_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "node_sample1_reshaped0"
-    input: "dec_Powcst"
-    output: "plus2_1_C0"
-    name: "plus2_1_Greater"
-    op_type: "Greater"
-    domain: ""
-  }
-  node {
-    input: "node_sample1_reshaped0"
-    input: "dec_Powcst"
-    output: "eq2_1_C0"
-    name: "eq2_1_Equal"
-    op_type: "Equal"
-    domain: ""
-  }
-  node {
-    input: "node_sample2_reshaped0"
-    input: "dec_Powcst"
-    output: "eq2_2_C0"
-    name: "eq2_2_Equal"
-    op_type: "Equal"
-    domain: ""
-  }
-  node {
-    input: "node_sample2_reshaped0"
-    input: "dec_Powcst"
-    output: "plus2_2_C0"
-    name: "plus2_2_Greater"
-    op_type: "Greater"
-    domain: ""
-  }
-  node {
-    input: "plus2_0_output0"
-    input: "node_sample0_reshaped0"
-    output: "eqp2ps0_C0"
-    name: "eqp2ps0_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "plus2_1_C0"
-    output: "plus2_1_output0"
-    name: "plus2_1_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "eq2_1_C0"
-    output: "eq2_1_output0"
-    name: "eq2_1_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "eq2_2_C0"
-    output: "eq2_2_output0"
-    name: "eq2_2_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "plus2_2_C0"
-    output: "plus2_2_output0"
-    name: "plus2_2_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 1
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "eqp2ps0_C0"
-    input: "eqp2p_m1_0_Addcst"
-    output: "eqp2p_m1_0_C0"
-    name: "eqp2p_m1_0_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp2ps0_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp_ns0_max0"
-    name: "eqp_ns0_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "plus2_1_output0"
-    input: "node_sample1_reshaped0"
-    output: "eqp2ps1_C0"
-    name: "eqp2ps1_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "plus2_2_output0"
-    input: "node_sample2_reshaped0"
-    output: "eqp2ps2_C0"
-    name: "eqp2ps2_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_0_C0"
-    input: "eqp2p_m1_0_Maxcst1"
-    output: "eqp2p_m1_0_max02"
-    name: "eqp2p_m1_0_Max1"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_0_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp2p_m1_0_max0"
-    name: "eqp2p_m1_0_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2ps1_C0"
-    input: "eqp2p_m1_0_Addcst"
-    output: "eqp2p_m1_1_C0"
-    name: "eqp2p_m1_1_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp2ps1_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp_ns1_max0"
-    name: "eqp_ns1_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2ps2_C0"
-    input: "eqp2p_m1_0_Addcst"
-    output: "eqp2p_m1_2_C0"
-    name: "eqp2p_m1_2_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp2ps2_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp_ns2_max0"
-    name: "eqp_ns2_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_0_max0"
-    output: "eqp_log0_output0"
-    name: "eqp_log0_Log"
-    op_type: "Log"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_0_max02"
-    input: "eqp_ns0_max0"
-    output: "eqp_ns0_C01"
-    name: "eqp_ns0_Div"
-    op_type: "Div"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_1_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp2p_m1_1_max0"
-    name: "eqp2p_m1_1_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_1_C0"
-    input: "eqp2p_m1_0_Maxcst1"
-    output: "eqp2p_m1_1_max02"
-    name: "eqp2p_m1_1_Max1"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_2_C0"
-    input: "eqp2p_m1_0_Maxcst"
-    output: "eqp2p_m1_2_max0"
-    name: "eqp2p_m1_2_Max"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_2_C0"
-    input: "eqp2p_m1_0_Maxcst1"
-    output: "eqp2p_m1_2_max02"
-    name: "eqp2p_m1_2_Max1"
-    op_type: "Max"
-    domain: ""
-  }
-  node {
-    input: "eqp_log0_output0"
-    input: "eqp_log0_Addcst"
-    output: "eqp_log0_C01"
-    name: "eqp_log0_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp_ns0_C01"
-    input: "eqp_ns0_Mulcst"
-    output: "eqp_ns0_C0"
-    name: "eqp_ns0_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_1_max02"
-    input: "eqp_ns1_max0"
-    output: "eqp_ns1_C01"
-    name: "eqp_ns1_Div"
-    op_type: "Div"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_1_max0"
-    output: "eqp_log1_output0"
-    name: "eqp_log1_Log"
-    op_type: "Log"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_2_max02"
-    input: "eqp_ns2_max0"
-    output: "eqp_ns2_C01"
-    name: "eqp_ns2_Div"
-    op_type: "Div"
-    domain: ""
-  }
-  node {
-    input: "eqp2p_m1_2_max0"
-    output: "eqp_log2_output0"
-    name: "eqp_log2_Log"
-    op_type: "Log"
-    domain: ""
-  }
-  node {
-    input: "eqp_log0_C01"
-    input: "dec_Powcst"
-    output: "eqp_log0_C0"
-    name: "eqp_log0_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp_log1_output0"
-    input: "eqp_log0_Addcst"
-    output: "eqp_log1_C01"
-    name: "eqp_log1_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp_ns1_C01"
-    input: "eqp_ns0_Mulcst"
-    output: "eqp_ns1_C0"
-    name: "eqp_ns1_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp_ns2_C01"
-    input: "eqp_ns0_Mulcst"
-    output: "eqp_ns2_C0"
-    name: "eqp_ns2_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp_log2_output0"
-    input: "eqp_log0_Addcst"
-    output: "eqp_log2_C01"
-    name: "eqp_log2_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp_log0_C0"
-    input: "eqp_ns0_C0"
-    output: "avlog0_C01"
-    name: "avlog0_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp_log1_C01"
-    input: "dec_Powcst"
-    output: "eqp_log1_C0"
-    name: "eqp_log1_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp_log2_C01"
-    input: "dec_Powcst"
-    output: "eqp_log2_C0"
-    name: "eqp_log2_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "avlog0_C01"
-    input: "plus2_0_output0"
-    output: "avlog0_C0"
-    name: "avlog0_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eqp_log1_C0"
-    input: "eqp_ns1_C0"
-    output: "avlog1_C01"
-    name: "avlog1_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eqp_log2_C0"
-    input: "eqp_ns2_C0"
-    output: "avlog2_C01"
-    name: "avlog2_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "avlog1_C01"
-    input: "plus2_1_output0"
-    output: "avlog1_C0"
-    name: "avlog1_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "eq2_0_output0"
-    input: "avlog0_C0"
-    output: "avpl0_C0"
-    name: "avpl0_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "avlog2_C01"
-    input: "plus2_2_output0"
-    output: "avlog2_C0"
-    name: "avlog2_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "path_length0_reshaped0"
-    input: "avpl0_C0"
-    output: "depth0_C01"
-    name: "depth0_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eq2_1_output0"
-    input: "avlog1_C0"
-    output: "avpl1_C0"
-    name: "avpl1_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "eq2_2_output0"
-    input: "avlog2_C0"
-    output: "avpl2_C0"
-    name: "avpl2_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "depth0_C01"
-    input: "eqp2p_m1_0_Addcst"
-    output: "depth0_C0"
-    name: "depth0_Add1"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "path_length1_reshaped0"
-    input: "avpl1_C0"
-    output: "depth1_C01"
-    name: "depth1_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "path_length2_reshaped0"
-    input: "avpl2_C0"
-    output: "depth2_C01"
-    name: "depth2_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "depth1_C01"
-    input: "eqp2p_m1_0_Addcst"
-    output: "depth1_C0"
-    name: "depth1_Add1"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "depth2_C01"
-    input: "eqp2p_m1_0_Addcst"
-    output: "depth2_C0"
-    name: "depth2_Add1"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "depth0_C0"
-    input: "depth1_C0"
-    input: "depth2_C0"
-    output: "dec_sum0"
-    name: "dec_Sum"
-    op_type: "Sum"
-    domain: ""
-  }
-  node {
-    input: "dec_sum0"
-    input: "dec_Divcst"
-    output: "dec_C0"
-    name: "dec_Div"
-    op_type: "Div"
-    domain: ""
-  }
-  node {
-    input: "dec_C0"
-    output: "dec_Y01"
-    name: "dec_Neg"
-    op_type: "Neg"
-    domain: ""
-  }
-  node {
-    input: "dec_Powcst"
-    input: "dec_Y01"
-    output: "dec_Z0"
-    name: "dec_Pow"
-    op_type: "Pow"
-    domain: ""
-  }
-  node {
-    input: "dec_Z0"
-    output: "dec_Y0"
-    name: "dec_Neg1"
-    op_type: "Neg"
-    domain: ""
-  }
-  node {
-    input: "dec_Y0"
-    input: "dec_Addcst"
-    output: "scores"
-    name: "dec_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  node {
-    input: "scores"
-    input: "eqp2p_m1_0_Maxcst1"
-    output: "predict_C01"
-    name: "predict_Less"
-    op_type: "Less"
-    domain: ""
-  }
-  node {
-    input: "predict_C01"
-    output: "predict_output0"
-    name: "predict_Cast"
-    op_type: "Cast"
-    attribute {
-      name: "to"
-      i: 7
-      type: INT
-    }
-    domain: ""
-  }
-  node {
-    input: "predict_output0"
-    input: "predict_Mulcst"
-    output: "predict_C0"
-    name: "predict_Mul"
-    op_type: "Mul"
-    domain: ""
-  }
-  node {
-    input: "predict_C0"
-    input: "predict_Addcst"
-    output: "label"
-    name: "predict_Add"
-    op_type: "Add"
-    domain: ""
-  }
-  name: "ONNX(IsolationForest)"
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: 2.0
-    name: "dec_Powcst"
-  }
-  initializer {
-    dims: 2
-    data_type: 7
-    int64_data: 0
-    int64_data: 1
-    name: "node_sample0_Gathercst"
-  }
-  initializer {
-    dims: 2
-    data_type: 7
-    int64_data: -1
-    int64_data: 1
-    name: "path_length0_Reshapecst"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: -1.0
-    name: "eqp2p_m1_0_Addcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: 1.0
-    name: "eqp2p_m1_0_Maxcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: 0.5772156715393066
-    name: "eqp_log0_Addcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: 0.0
-    name: "eqp2p_m1_0_Maxcst1"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: -2.0
-    name: "eqp_ns0_Mulcst"
-  }
-  initializer {
-    dims: 1
-    dims: 1
-    data_type: 1
-    float_data: 25.094013214111328
-    name: "dec_Divcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 1
-    float_data: 0.5
-    name: "dec_Addcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 7
-    int64_data: -2
-    name: "predict_Mulcst"
-  }
-  initializer {
-    dims: 1
-    data_type: 7
-    int64_data: 1
-    name: "predict_Addcst"
-  }
-  input {
-    name: "X"
-    type {
-      tensor_type {
-        elem_type: 1
-        shape {
-          dim {
-          }
-          dim {
-            dim_value: 2
-          }
-        }
-      }
-    }
-  }
-  output {
-    name: "label"
-    type {
-      tensor_type {
-        elem_type: 7
-        shape {
-          dim {
-          }
-          dim {
-            dim_value: 1
-          }
-        }
-      }
-    }
-  }
-  output {
-    name: "scores"
-    type {
-      tensor_type {
-        elem_type: 1
-        shape {
-          dim {
-          }
-          dim {
-            dim_value: 1
-          }
-        }
-      }
-    }
-  }
-}
-opset_import {
-  domain: "ai.onnx.ml"
-  version: 2
-}
-opset_import {
-  domain: ""
-  version: 15
-}
-
-
-

The last line shows the opsets. -Let’s extract it.

-
domains = onx.opset_import
-for dom in domains:
-    print("domain: %r, version: %r" % (dom.domain, dom.version))
-
-
-

Out:

-
domain: 'ai.onnx.ml', version: 2
-domain: '', version: 15
-
-
-

There are two opsets, one for standard operators, -the other for machine learning operators.

-
-
-

ONNX and opset#

-

The converter can convert a model to an older opset -than the default one, from 1 to the last available one.

-
def get_domain_opset(onx):
-    domains = onx.opset_import
-    res = [{'domain': dom.domain, 'version': dom.version}
-           for dom in domains]
-    return {d['domain']: d['version'] for d in res}
-
-
-for opset in range(6, onnx_opset_version() + 1):
-    try:
-        onx = to_onnx(model, X[:1].astype(numpy.float32),
-                      target_opset={'': opset, 'ai.onnx.ml': 2})
-    except RuntimeError as e:
-        print('target: %r error: %r' % (opset, e))
-        continue
-    nodes = len(onx.graph.node)
-    print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))
-
-
-

Out:

-
target: 6 --> {'': 6, 'ai.onnx.ml': 2} 91
-target: 7 --> {'': 7, 'ai.onnx.ml': 2} 91
-target: 8 --> {'': 8, 'ai.onnx.ml': 2} 91
-target: 9 --> {'': 9, 'ai.onnx.ml': 2} 91
-target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
-target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
-target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
-target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
-target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
-target: 15 --> {'ai.onnx.ml': 2, '': 15} 91
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-
-
-

It shows that the model cannot be converted for opset -below 5. Operator Reshape changed in -opset 5: a parameter became an input. The converter -does not support opset < 5 because runtimes usually do not.

-
-
-

Other opsets#

-

The previous example changed the opset of the main domain -'' but the other opset domain can be changed as well.

-
for opset in range(9, onnx_opset_version() + 1):
-    for opset_ml in range(1, 4):
-        tops = {'': opset, 'ai.onnx.ml': opset_ml}
-        try:
-            print("try target_opset:", tops)
-            onx = to_onnx(
-                model, X[:1].astype(numpy.float32), target_opset=tops)
-        except RuntimeError as e:
-            print('target: %r error: %r' % (opset, e))
-            continue
-        nodes = len(onx.graph.node)
-        print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))
-
-
-

Out:

-
try target_opset: {'': 9, 'ai.onnx.ml': 1}
-target: 9 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 9, 'ai.onnx.ml': 2}
-target: 9 --> {'': 9, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 9, 'ai.onnx.ml': 3}
-target: 9 --> {'': 9, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 10, 'ai.onnx.ml': 1}
-target: 10 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 10, 'ai.onnx.ml': 2}
-target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
-try target_opset: {'': 10, 'ai.onnx.ml': 3}
-target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
-try target_opset: {'': 11, 'ai.onnx.ml': 1}
-target: 11 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 11, 'ai.onnx.ml': 2}
-target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
-try target_opset: {'': 11, 'ai.onnx.ml': 3}
-target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
-try target_opset: {'': 12, 'ai.onnx.ml': 1}
-target: 12 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 12, 'ai.onnx.ml': 2}
-target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 12, 'ai.onnx.ml': 3}
-target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 13, 'ai.onnx.ml': 1}
-target: 13 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 13, 'ai.onnx.ml': 2}
-target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 13, 'ai.onnx.ml': 3}
-target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 14, 'ai.onnx.ml': 1}
-target: 14 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 14, 'ai.onnx.ml': 2}
-target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 14, 'ai.onnx.ml': 3}
-target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
-try target_opset: {'': 15, 'ai.onnx.ml': 1}
-target: 15 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 15, 'ai.onnx.ml': 2}
-target: 15 --> {'ai.onnx.ml': 2, '': 15} 91
-try target_opset: {'': 15, 'ai.onnx.ml': 3}
-target: 15 --> {'ai.onnx.ml': 2, '': 15} 91
-try target_opset: {'': 16, 'ai.onnx.ml': 1}
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 16 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 16, 'ai.onnx.ml': 2}
-target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-try target_opset: {'': 16, 'ai.onnx.ml': 3}
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 16 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 16 error: RuntimeError("The model is using version 16 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-try target_opset: {'': 17, 'ai.onnx.ml': 1}
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 17 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
-try target_opset: {'': 17, 'ai.onnx.ml': 2}
-target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-try target_opset: {'': 17, 'ai.onnx.ml': 3}
-D:\github\onnx\sklearn-onnx\skl2onnx\common\_topology.py:1405: UserWarning: Parameter target_opset 17 > 15 is higher than the the latest tested version.
-  warnings.warn(
-target: 17 error: RuntimeError("The model is using version 17 of domain '' not supported yet by this library. You need to specify target_opset={'': 15}.")
-
-
-

Total running time of the script: ( 0 minutes 5.264 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + What is the opset number? - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

What is the opset number?#

+

Every library is versioned. scikit-learn may change +the implementation of a specific model. That happens +for example with the SVC model where +the parameter break_ties was added in 0.22. ONNX +does also have a version called opset number. +Operator ArgMin was added in opset 1 and changed in opset +11, 12, 13. Sometimes, it is updated to extend the list +of types it supports, sometimes, it moves a parameter +into the input list. The runtime used to deploy the model +does not implement a new version, in that case, a model +must be converted by usually using the most recent opset +supported by the runtime, we call that opset the +targeted opset. An ONNX graph only contains +one unique opset, every node must be described following +the specifications defined by the latest opset below the +targeted opset.

+

This example considers an IsolationForest and digs into opsets.

+
+

Data#

+

A simple example.

+
from onnx.defs import onnx_opset_version
+from skl2onnx import to_onnx
+import numpy
+import matplotlib.pyplot as plt
+from sklearn.ensemble import IsolationForest
+from sklearn.datasets import make_blobs
+
+X, y = make_blobs(n_samples=100, n_features=2)
+
+model = IsolationForest(n_estimators=3)
+model.fit(X)
+labels = model.predict(X)
+
+fig, ax = plt.subplots(1, 1)
+for k in (-1, 1):
+    ax.plot(X[labels == k, 0], X[labels == k, 1], 'o', label="cl%d" % k)
+ax.set_title("Sample")
+
+
+Sample
+
+

ONNX#

+
onx = to_onnx(model, X[:1].astype(numpy.float32),
+              target_opset={'': 15, 'ai.onnx.ml': 2})
+print(onx)
+
+
+
ir_version: 8
+producer_name: "skl2onnx"
+producer_version: "1.14.0"
+domain: "ai.onnx"
+model_version: 0
+doc_string: ""
+graph {
+  node {
+    input: "X"
+    input: "node_sample0_Gathercst"
+    output: "node_sample0_output0"
+    name: "node_sample0_Gather"
+    op_type: "Gather"
+    attribute {
+      name: "axis"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample0_output0"
+    output: "node_sample0_Y0"
+    name: "node_sample0_TreeEnsembleRegressor"
+    op_type: "TreeEnsembleRegressor"
+    attribute {
+      name: "n_targets"
+      i: 1
+      type: INT
+    }
+    attribute {
+      name: "nodes_falsenodeids"
+      ints: 52
+      ints: 11
+      ints: 4
+      ints: 0
+      ints: 10
+      ints: 7
+      ints: 0
+      ints: 9
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 35
+      ints: 20
+      ints: 15
+      ints: 0
+      ints: 17
+      ints: 0
+      ints: 19
+      ints: 0
+      ints: 0
+      ints: 28
+      ints: 25
+      ints: 24
+      ints: 0
+      ints: 0
+      ints: 27
+      ints: 0
+      ints: 0
+      ints: 32
+      ints: 31
+      ints: 0
+      ints: 0
+      ints: 34
+      ints: 0
+      ints: 0
+      ints: 45
+      ints: 44
+      ints: 41
+      ints: 40
+      ints: 0
+      ints: 0
+      ints: 43
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 51
+      ints: 50
+      ints: 49
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 56
+      ints: 55
+      ints: 0
+      ints: 0
+      ints: 58
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_featureids"
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_hitrates"
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    attribute {
+      name: "nodes_missing_value_tracks_true"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_modes"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      type: STRINGS
+    }
+    attribute {
+      name: "nodes_nodeids"
+      ints: 0
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 4
+      ints: 5
+      ints: 6
+      ints: 7
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 14
+      ints: 15
+      ints: 16
+      ints: 17
+      ints: 18
+      ints: 19
+      ints: 20
+      ints: 21
+      ints: 22
+      ints: 23
+      ints: 24
+      ints: 25
+      ints: 26
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 30
+      ints: 31
+      ints: 32
+      ints: 33
+      ints: 34
+      ints: 35
+      ints: 36
+      ints: 37
+      ints: 38
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 45
+      ints: 46
+      ints: 47
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 56
+      ints: 57
+      ints: 58
+      type: INTS
+    }
+    attribute {
+      name: "nodes_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_truenodeids"
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 0
+      ints: 5
+      ints: 6
+      ints: 0
+      ints: 8
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 12
+      ints: 13
+      ints: 14
+      ints: 0
+      ints: 16
+      ints: 0
+      ints: 18
+      ints: 0
+      ints: 0
+      ints: 21
+      ints: 22
+      ints: 23
+      ints: 0
+      ints: 0
+      ints: 26
+      ints: 0
+      ints: 0
+      ints: 29
+      ints: 30
+      ints: 0
+      ints: 0
+      ints: 33
+      ints: 0
+      ints: 0
+      ints: 36
+      ints: 37
+      ints: 38
+      ints: 39
+      ints: 0
+      ints: 0
+      ints: 42
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 46
+      ints: 47
+      ints: 48
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 53
+      ints: 54
+      ints: 0
+      ints: 0
+      ints: 57
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_values"
+      floats: 9.529556274414062
+      floats: 0.06639961153268814
+      floats: -1.2243356704711914
+      floats: 0.0
+      floats: 0.1448548436164856
+      floats: -1.0675677061080933
+      floats: 0.0
+      floats: -0.4722660183906555
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 6.237240314483643
+      floats: -1.5116932392120361
+      floats: 0.8247517943382263
+      floats: 0.0
+      floats: -5.144781589508057
+      floats: 0.0
+      floats: 5.4886579513549805
+      floats: 0.0
+      floats: 0.0
+      floats: -0.7253645658493042
+      floats: 1.3656964302062988
+      floats: -0.8799896836280823
+      floats: 0.0
+      floats: 0.0
+      floats: -1.0190058946609497
+      floats: 0.0
+      floats: 0.0
+      floats: -0.49304017424583435
+      floats: -0.5850936770439148
+      floats: 0.0
+      floats: 0.0
+      floats: 1.2917989492416382
+      floats: 0.0
+      floats: 0.0
+      floats: 7.957119941711426
+      floats: 7.862874507904053
+      floats: 7.656042575836182
+      floats: 7.188352584838867
+      floats: 0.0
+      floats: 0.0
+      floats: 7.790811538696289
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 9.259439468383789
+      floats: 8.948318481445312
+      floats: -4.675846576690674
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: -3.819464921951294
+      floats: -4.20181941986084
+      floats: 0.0
+      floats: 0.0
+      floats: -3.639735460281372
+      floats: 0.0
+      floats: 0.0
+      type: FLOATS
+    }
+    attribute {
+      name: "post_transform"
+      s: "NONE"
+      type: STRING
+    }
+    attribute {
+      name: "target_ids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_nodeids"
+      ints: 3
+      ints: 6
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 14
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 23
+      ints: 24
+      ints: 26
+      ints: 27
+      ints: 30
+      ints: 31
+      ints: 33
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 54
+      ints: 55
+      ints: 57
+      ints: 58
+      type: INTS
+    }
+    attribute {
+      name: "target_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_weights"
+      floats: 3.0
+      floats: 6.0
+      floats: 8.0
+      floats: 9.0
+      floats: 10.0
+      floats: 14.0
+      floats: 16.0
+      floats: 18.0
+      floats: 19.0
+      floats: 23.0
+      floats: 24.0
+      floats: 26.0
+      floats: 27.0
+      floats: 30.0
+      floats: 31.0
+      floats: 33.0
+      floats: 34.0
+      floats: 39.0
+      floats: 40.0
+      floats: 42.0
+      floats: 43.0
+      floats: 44.0
+      floats: 48.0
+      floats: 49.0
+      floats: 50.0
+      floats: 51.0
+      floats: 54.0
+      floats: 55.0
+      floats: 57.0
+      floats: 58.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "X"
+    input: "node_sample0_Gathercst"
+    output: "node_sample2_output0"
+    name: "node_sample2_Gather"
+    op_type: "Gather"
+    attribute {
+      name: "axis"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "X"
+    input: "node_sample0_Gathercst"
+    output: "node_sample1_output0"
+    name: "node_sample1_Gather"
+    op_type: "Gather"
+    attribute {
+      name: "axis"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample2_output0"
+    output: "node_sample2_Y0"
+    name: "node_sample2_TreeEnsembleRegressor"
+    op_type: "TreeEnsembleRegressor"
+    attribute {
+      name: "n_targets"
+      i: 1
+      type: INT
+    }
+    attribute {
+      name: "nodes_falsenodeids"
+      ints: 72
+      ints: 45
+      ints: 22
+      ints: 17
+      ints: 12
+      ints: 9
+      ints: 8
+      ints: 0
+      ints: 0
+      ints: 11
+      ints: 0
+      ints: 0
+      ints: 14
+      ints: 0
+      ints: 16
+      ints: 0
+      ints: 0
+      ints: 19
+      ints: 0
+      ints: 21
+      ints: 0
+      ints: 0
+      ints: 30
+      ints: 25
+      ints: 0
+      ints: 29
+      ints: 28
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 38
+      ints: 35
+      ints: 34
+      ints: 0
+      ints: 0
+      ints: 37
+      ints: 0
+      ints: 0
+      ints: 42
+      ints: 41
+      ints: 0
+      ints: 0
+      ints: 44
+      ints: 0
+      ints: 0
+      ints: 69
+      ints: 58
+      ints: 53
+      ints: 52
+      ints: 51
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 55
+      ints: 0
+      ints: 57
+      ints: 0
+      ints: 0
+      ints: 66
+      ints: 63
+      ints: 62
+      ints: 0
+      ints: 0
+      ints: 65
+      ints: 0
+      ints: 0
+      ints: 68
+      ints: 0
+      ints: 0
+      ints: 71
+      ints: 0
+      ints: 0
+      ints: 74
+      ints: 0
+      ints: 96
+      ints: 83
+      ints: 78
+      ints: 0
+      ints: 82
+      ints: 81
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 91
+      ints: 88
+      ints: 87
+      ints: 0
+      ints: 0
+      ints: 90
+      ints: 0
+      ints: 0
+      ints: 95
+      ints: 94
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 102
+      ints: 99
+      ints: 0
+      ints: 101
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_featureids"
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_hitrates"
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    attribute {
+      name: "nodes_missing_value_tracks_true"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_modes"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      type: STRINGS
+    }
+    attribute {
+      name: "nodes_nodeids"
+      ints: 0
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 4
+      ints: 5
+      ints: 6
+      ints: 7
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 14
+      ints: 15
+      ints: 16
+      ints: 17
+      ints: 18
+      ints: 19
+      ints: 20
+      ints: 21
+      ints: 22
+      ints: 23
+      ints: 24
+      ints: 25
+      ints: 26
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 30
+      ints: 31
+      ints: 32
+      ints: 33
+      ints: 34
+      ints: 35
+      ints: 36
+      ints: 37
+      ints: 38
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 45
+      ints: 46
+      ints: 47
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 56
+      ints: 57
+      ints: 58
+      ints: 59
+      ints: 60
+      ints: 61
+      ints: 62
+      ints: 63
+      ints: 64
+      ints: 65
+      ints: 66
+      ints: 67
+      ints: 68
+      ints: 69
+      ints: 70
+      ints: 71
+      ints: 72
+      ints: 73
+      ints: 74
+      ints: 75
+      ints: 76
+      ints: 77
+      ints: 78
+      ints: 79
+      ints: 80
+      ints: 81
+      ints: 82
+      ints: 83
+      ints: 84
+      ints: 85
+      ints: 86
+      ints: 87
+      ints: 88
+      ints: 89
+      ints: 90
+      ints: 91
+      ints: 92
+      ints: 93
+      ints: 94
+      ints: 95
+      ints: 96
+      ints: 97
+      ints: 98
+      ints: 99
+      ints: 100
+      ints: 101
+      ints: 102
+      type: INTS
+    }
+    attribute {
+      name: "nodes_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_truenodeids"
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 4
+      ints: 5
+      ints: 6
+      ints: 7
+      ints: 0
+      ints: 0
+      ints: 10
+      ints: 0
+      ints: 0
+      ints: 13
+      ints: 0
+      ints: 15
+      ints: 0
+      ints: 0
+      ints: 18
+      ints: 0
+      ints: 20
+      ints: 0
+      ints: 0
+      ints: 23
+      ints: 24
+      ints: 0
+      ints: 26
+      ints: 27
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 31
+      ints: 32
+      ints: 33
+      ints: 0
+      ints: 0
+      ints: 36
+      ints: 0
+      ints: 0
+      ints: 39
+      ints: 40
+      ints: 0
+      ints: 0
+      ints: 43
+      ints: 0
+      ints: 0
+      ints: 46
+      ints: 47
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 54
+      ints: 0
+      ints: 56
+      ints: 0
+      ints: 0
+      ints: 59
+      ints: 60
+      ints: 61
+      ints: 0
+      ints: 0
+      ints: 64
+      ints: 0
+      ints: 0
+      ints: 67
+      ints: 0
+      ints: 0
+      ints: 70
+      ints: 0
+      ints: 0
+      ints: 73
+      ints: 0
+      ints: 75
+      ints: 76
+      ints: 77
+      ints: 0
+      ints: 79
+      ints: 80
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 84
+      ints: 85
+      ints: 86
+      ints: 0
+      ints: 0
+      ints: 89
+      ints: 0
+      ints: 0
+      ints: 92
+      ints: 93
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 97
+      ints: 98
+      ints: 0
+      ints: 100
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_values"
+      floats: 5.84586238861084
+      floats: 3.3479835987091064
+      floats: 1.3570774793624878
+      floats: 1.2792494297027588
+      floats: 0.8155440092086792
+      floats: -0.14481677114963531
+      floats: -0.6906545162200928
+      floats: 0.0
+      floats: 0.0
+      floats: -0.7135818004608154
+      floats: 0.0
+      floats: 0.0
+      floats: 0.9292002320289612
+      floats: 0.0
+      floats: 1.0187170505523682
+      floats: 0.0
+      floats: 0.0
+      floats: 1.3404710292816162
+      floats: 0.0
+      floats: 1.343726634979248
+      floats: 0.0
+      floats: 0.0
+      floats: 1.8512831926345825
+      floats: -0.5900801420211792
+      floats: 0.0
+      floats: -0.019533302634954453
+      floats: 1.626607060432434
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: -0.19708473980426788
+      floats: 2.966790199279785
+      floats: -1.8903709650039673
+      floats: 0.0
+      floats: 0.0
+      floats: 3.3281314373016357
+      floats: 0.0
+      floats: 0.0
+      floats: 2.2592618465423584
+      floats: 2.113555669784546
+      floats: 0.0
+      floats: 0.0
+      floats: 0.04144952446222305
+      floats: 0.0
+      floats: 0.0
+      floats: -0.7720679640769958
+      floats: -2.6698925495147705
+      floats: -3.359083414077759
+      floats: -3.819303274154663
+      floats: -3.8522212505340576
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 3.8754918575286865
+      floats: 0.0
+      floats: -2.76548433303833
+      floats: 0.0
+      floats: 0.0
+      floats: -1.5094605684280396
+      floats: 4.876166343688965
+      floats: -2.066214084625244
+      floats: 0.0
+      floats: 0.0
+      floats: 5.31315803527832
+      floats: 0.0
+      floats: 0.0
+      floats: 5.141677379608154
+      floats: 0.0
+      floats: 0.0
+      floats: 4.081350326538086
+      floats: 0.0
+      floats: 0.0
+      floats: -6.860238075256348
+      floats: 0.0
+      floats: 9.618461608886719
+      floats: 7.555417537689209
+      floats: 6.563416481018066
+      floats: 0.0
+      floats: 7.313610076904297
+      floats: -5.703146934509277
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: -4.049132823944092
+      floats: -4.672673225402832
+      floats: -5.863733291625977
+      floats: 0.0
+      floats: 0.0
+      floats: -4.3013410568237305
+      floats: 0.0
+      floats: 0.0
+      floats: -2.4979329109191895
+      floats: -3.212557315826416
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 9.996405601501465
+      floats: 9.734021186828613
+      floats: 0.0
+      floats: 9.765551567077637
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      type: FLOATS
+    }
+    attribute {
+      name: "post_transform"
+      s: "NONE"
+      type: STRING
+    }
+    attribute {
+      name: "target_ids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_nodeids"
+      ints: 7
+      ints: 8
+      ints: 10
+      ints: 11
+      ints: 13
+      ints: 15
+      ints: 16
+      ints: 18
+      ints: 20
+      ints: 21
+      ints: 24
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 33
+      ints: 34
+      ints: 36
+      ints: 37
+      ints: 40
+      ints: 41
+      ints: 43
+      ints: 44
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 54
+      ints: 56
+      ints: 57
+      ints: 61
+      ints: 62
+      ints: 64
+      ints: 65
+      ints: 67
+      ints: 68
+      ints: 70
+      ints: 71
+      ints: 73
+      ints: 77
+      ints: 80
+      ints: 81
+      ints: 82
+      ints: 86
+      ints: 87
+      ints: 89
+      ints: 90
+      ints: 93
+      ints: 94
+      ints: 95
+      ints: 98
+      ints: 100
+      ints: 101
+      ints: 102
+      type: INTS
+    }
+    attribute {
+      name: "target_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_weights"
+      floats: 7.0
+      floats: 8.0
+      floats: 10.0
+      floats: 11.0
+      floats: 13.0
+      floats: 15.0
+      floats: 16.0
+      floats: 18.0
+      floats: 20.0
+      floats: 21.0
+      floats: 24.0
+      floats: 27.0
+      floats: 28.0
+      floats: 29.0
+      floats: 33.0
+      floats: 34.0
+      floats: 36.0
+      floats: 37.0
+      floats: 40.0
+      floats: 41.0
+      floats: 43.0
+      floats: 44.0
+      floats: 50.0
+      floats: 51.0
+      floats: 52.0
+      floats: 54.0
+      floats: 56.0
+      floats: 57.0
+      floats: 61.0
+      floats: 62.0
+      floats: 64.0
+      floats: 65.0
+      floats: 67.0
+      floats: 68.0
+      floats: 70.0
+      floats: 71.0
+      floats: 73.0
+      floats: 77.0
+      floats: 80.0
+      floats: 81.0
+      floats: 82.0
+      floats: 86.0
+      floats: 87.0
+      floats: 89.0
+      floats: 90.0
+      floats: 93.0
+      floats: 94.0
+      floats: 95.0
+      floats: 98.0
+      floats: 100.0
+      floats: 101.0
+      floats: 102.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample0_Y0"
+    output: "node_sample0_output02"
+    name: "node_sample0_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample1_output0"
+    output: "node_sample1_Y0"
+    name: "node_sample1_TreeEnsembleRegressor"
+    op_type: "TreeEnsembleRegressor"
+    attribute {
+      name: "n_targets"
+      i: 1
+      type: INT
+    }
+    attribute {
+      name: "nodes_falsenodeids"
+      ints: 56
+      ints: 33
+      ints: 14
+      ints: 5
+      ints: 0
+      ints: 13
+      ints: 10
+      ints: 9
+      ints: 0
+      ints: 0
+      ints: 12
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 20
+      ints: 17
+      ints: 0
+      ints: 19
+      ints: 0
+      ints: 0
+      ints: 26
+      ints: 23
+      ints: 0
+      ints: 25
+      ints: 0
+      ints: 0
+      ints: 30
+      ints: 29
+      ints: 0
+      ints: 0
+      ints: 32
+      ints: 0
+      ints: 0
+      ints: 35
+      ints: 0
+      ints: 43
+      ints: 42
+      ints: 41
+      ints: 40
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 51
+      ints: 48
+      ints: 47
+      ints: 0
+      ints: 0
+      ints: 50
+      ints: 0
+      ints: 0
+      ints: 55
+      ints: 54
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 70
+      ints: 65
+      ints: 62
+      ints: 61
+      ints: 0
+      ints: 0
+      ints: 64
+      ints: 0
+      ints: 0
+      ints: 67
+      ints: 0
+      ints: 69
+      ints: 0
+      ints: 0
+      ints: 72
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_featureids"
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 1
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 1
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_hitrates"
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    attribute {
+      name: "nodes_missing_value_tracks_true"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_modes"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      strings: "BRANCH_LEQ"
+      strings: "LEAF"
+      strings: "LEAF"
+      type: STRINGS
+    }
+    attribute {
+      name: "nodes_nodeids"
+      ints: 0
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 4
+      ints: 5
+      ints: 6
+      ints: 7
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 14
+      ints: 15
+      ints: 16
+      ints: 17
+      ints: 18
+      ints: 19
+      ints: 20
+      ints: 21
+      ints: 22
+      ints: 23
+      ints: 24
+      ints: 25
+      ints: 26
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 30
+      ints: 31
+      ints: 32
+      ints: 33
+      ints: 34
+      ints: 35
+      ints: 36
+      ints: 37
+      ints: 38
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 45
+      ints: 46
+      ints: 47
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 56
+      ints: 57
+      ints: 58
+      ints: 59
+      ints: 60
+      ints: 61
+      ints: 62
+      ints: 63
+      ints: 64
+      ints: 65
+      ints: 66
+      ints: 67
+      ints: 68
+      ints: 69
+      ints: 70
+      ints: 71
+      ints: 72
+      type: INTS
+    }
+    attribute {
+      name: "nodes_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_truenodeids"
+      ints: 1
+      ints: 2
+      ints: 3
+      ints: 4
+      ints: 0
+      ints: 6
+      ints: 7
+      ints: 8
+      ints: 0
+      ints: 0
+      ints: 11
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 15
+      ints: 16
+      ints: 0
+      ints: 18
+      ints: 0
+      ints: 0
+      ints: 21
+      ints: 22
+      ints: 0
+      ints: 24
+      ints: 0
+      ints: 0
+      ints: 27
+      ints: 28
+      ints: 0
+      ints: 0
+      ints: 31
+      ints: 0
+      ints: 0
+      ints: 34
+      ints: 0
+      ints: 36
+      ints: 37
+      ints: 38
+      ints: 39
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 44
+      ints: 45
+      ints: 46
+      ints: 0
+      ints: 0
+      ints: 49
+      ints: 0
+      ints: 0
+      ints: 52
+      ints: 53
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 57
+      ints: 58
+      ints: 59
+      ints: 60
+      ints: 0
+      ints: 0
+      ints: 63
+      ints: 0
+      ints: 0
+      ints: 66
+      ints: 0
+      ints: 68
+      ints: 0
+      ints: 0
+      ints: 71
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "nodes_values"
+      floats: 0.2054523527622223
+      floats: 5.720078945159912
+      floats: -2.2732791900634766
+      floats: -4.630293369293213
+      floats: 0.0
+      floats: 5.411093711853027
+      floats: -3.3866586685180664
+      floats: 4.608852386474609
+      floats: 0.0
+      floats: 0.0
+      floats: 3.734891891479492
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: -0.10917384922504425
+      floats: -1.2374401092529297
+      floats: 0.0
+      floats: -0.827874481678009
+      floats: 0.0
+      floats: 0.0
+      floats: -2.071647882461548
+      floats: -2.102121114730835
+      floats: 0.0
+      floats: -2.0763232707977295
+      floats: 0.0
+      floats: 0.0
+      floats: 0.6638698577880859
+      floats: -0.8181525468826294
+      floats: 0.0
+      floats: 0.0
+      floats: 0.10006757825613022
+      floats: 0.0
+      floats: 0.0
+      floats: -6.082978248596191
+      floats: 0.0
+      floats: -4.585964202880859
+      floats: 9.586073875427246
+      floats: -4.862358570098877
+      floats: 7.337919235229492
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: -3.677865743637085
+      floats: 7.8631792068481445
+      floats: -3.906221628189087
+      floats: 0.0
+      floats: 0.0
+      floats: -4.509670257568359
+      floats: 0.0
+      floats: 0.0
+      floats: -2.576967477798462
+      floats: 8.915074348449707
+      floats: 0.0
+      floats: 0.0
+      floats: 0.0
+      floats: 2.103733777999878
+      floats: 0.5658510327339172
+      floats: 1.2207704782485962
+      floats: 0.022815637290477753
+      floats: 0.0
+      floats: 0.0
+      floats: 1.3473073244094849
+      floats: 0.0
+      floats: 0.0
+      floats: 0.9728590846061707
+      floats: 0.0
+      floats: 0.4070947468280792
+      floats: 0.0
+      floats: 0.0
+      floats: 2.6099328994750977
+      floats: 0.0
+      floats: 0.0
+      type: FLOATS
+    }
+    attribute {
+      name: "post_transform"
+      s: "NONE"
+      type: STRING
+    }
+    attribute {
+      name: "target_ids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_nodeids"
+      ints: 4
+      ints: 8
+      ints: 9
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 22
+      ints: 24
+      ints: 25
+      ints: 28
+      ints: 29
+      ints: 31
+      ints: 32
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 46
+      ints: 47
+      ints: 49
+      ints: 50
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 60
+      ints: 61
+      ints: 63
+      ints: 64
+      ints: 66
+      ints: 68
+      ints: 69
+      ints: 71
+      ints: 72
+      type: INTS
+    }
+    attribute {
+      name: "target_treeids"
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      ints: 0
+      type: INTS
+    }
+    attribute {
+      name: "target_weights"
+      floats: 4.0
+      floats: 8.0
+      floats: 9.0
+      floats: 11.0
+      floats: 12.0
+      floats: 13.0
+      floats: 16.0
+      floats: 18.0
+      floats: 19.0
+      floats: 22.0
+      floats: 24.0
+      floats: 25.0
+      floats: 28.0
+      floats: 29.0
+      floats: 31.0
+      floats: 32.0
+      floats: 34.0
+      floats: 39.0
+      floats: 40.0
+      floats: 41.0
+      floats: 42.0
+      floats: 46.0
+      floats: 47.0
+      floats: 49.0
+      floats: 50.0
+      floats: 53.0
+      floats: 54.0
+      floats: 55.0
+      floats: 60.0
+      floats: 61.0
+      floats: 63.0
+      floats: 64.0
+      floats: 66.0
+      floats: 68.0
+      floats: 69.0
+      floats: 71.0
+      floats: 72.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample0_Y0"
+    output: "path_length0_output0"
+    name: "path_length0_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample0_output02"
+    output: "node_sample0_Y02"
+    name: "node_sample0_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 3
+      ints: 6
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 14
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 23
+      ints: 24
+      ints: 26
+      ints: 27
+      ints: 30
+      ints: 31
+      ints: 33
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 54
+      ints: 55
+      ints: 57
+      ints: 58
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 27.0
+      floats: 2.0
+      floats: 4.0
+      floats: 1.0
+      floats: 3.0
+      floats: 1.0
+      floats: 1.0
+      floats: 3.0
+      floats: 8.0
+      floats: 10.0
+      floats: 5.0
+      floats: 3.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 5.0
+      floats: 11.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample2_Y0"
+    output: "path_length2_output0"
+    name: "path_length2_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample1_Y0"
+    output: "node_sample1_output02"
+    name: "node_sample1_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample2_Y0"
+    output: "node_sample2_output02"
+    name: "node_sample2_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample1_Y0"
+    output: "path_length1_output0"
+    name: "path_length1_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "path_length0_output0"
+    output: "path_length0_Y0"
+    name: "path_length0_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 3
+      ints: 6
+      ints: 8
+      ints: 9
+      ints: 10
+      ints: 14
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 23
+      ints: 24
+      ints: 26
+      ints: 27
+      ints: 30
+      ints: 31
+      ints: 33
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 42
+      ints: 43
+      ints: 44
+      ints: 48
+      ints: 49
+      ints: 50
+      ints: 51
+      ints: 54
+      ints: 55
+      ints: 57
+      ints: 58
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 4.0
+      floats: 6.0
+      floats: 7.0
+      floats: 7.0
+      floats: 5.0
+      floats: 6.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 6.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 6.0
+      floats: 4.0
+      floats: 4.0
+      floats: 4.0
+      floats: 4.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample0_Y02"
+    input: "path_length0_Reshapecst"
+    output: "node_sample0_reshaped0"
+    name: "node_sample0_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "path_length1_output0"
+    output: "path_length1_Y0"
+    name: "path_length1_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 4
+      ints: 8
+      ints: 9
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 22
+      ints: 24
+      ints: 25
+      ints: 28
+      ints: 29
+      ints: 31
+      ints: 32
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 46
+      ints: 47
+      ints: 49
+      ints: 50
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 60
+      ints: 61
+      ints: 63
+      ints: 64
+      ints: 66
+      ints: 68
+      ints: 69
+      ints: 71
+      ints: 72
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 5.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 6.0
+      floats: 6.0
+      floats: 7.0
+      floats: 7.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 4.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 6.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 6.0
+      floats: 6.0
+      floats: 6.0
+      floats: 6.0
+      floats: 5.0
+      floats: 6.0
+      floats: 6.0
+      floats: 4.0
+      floats: 4.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample2_output02"
+    output: "node_sample2_Y02"
+    name: "node_sample2_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 7
+      ints: 8
+      ints: 10
+      ints: 11
+      ints: 13
+      ints: 15
+      ints: 16
+      ints: 18
+      ints: 20
+      ints: 21
+      ints: 24
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 33
+      ints: 34
+      ints: 36
+      ints: 37
+      ints: 40
+      ints: 41
+      ints: 43
+      ints: 44
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 54
+      ints: 56
+      ints: 57
+      ints: 61
+      ints: 62
+      ints: 64
+      ints: 65
+      ints: 67
+      ints: 68
+      ints: 70
+      ints: 71
+      ints: 73
+      ints: 77
+      ints: 80
+      ints: 81
+      ints: 82
+      ints: 86
+      ints: 87
+      ints: 89
+      ints: 90
+      ints: 93
+      ints: 94
+      ints: 95
+      ints: 98
+      ints: 100
+      ints: 101
+      ints: 102
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 1.0
+      floats: 2.0
+      floats: 4.0
+      floats: 4.0
+      floats: 1.0
+      floats: 2.0
+      floats: 3.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 2.0
+      floats: 2.0
+      floats: 1.0
+      floats: 3.0
+      floats: 3.0
+      floats: 1.0
+      floats: 1.0
+      floats: 2.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 2.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 4.0
+      floats: 1.0
+      floats: 2.0
+      floats: 7.0
+      floats: 3.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 4.0
+      floats: 1.0
+      floats: 1.0
+      floats: 9.0
+      floats: 1.0
+      floats: 2.0
+      floats: 7.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "path_length2_output0"
+    output: "path_length2_Y0"
+    name: "path_length2_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 7
+      ints: 8
+      ints: 10
+      ints: 11
+      ints: 13
+      ints: 15
+      ints: 16
+      ints: 18
+      ints: 20
+      ints: 21
+      ints: 24
+      ints: 27
+      ints: 28
+      ints: 29
+      ints: 33
+      ints: 34
+      ints: 36
+      ints: 37
+      ints: 40
+      ints: 41
+      ints: 43
+      ints: 44
+      ints: 50
+      ints: 51
+      ints: 52
+      ints: 54
+      ints: 56
+      ints: 57
+      ints: 61
+      ints: 62
+      ints: 64
+      ints: 65
+      ints: 67
+      ints: 68
+      ints: 70
+      ints: 71
+      ints: 73
+      ints: 77
+      ints: 80
+      ints: 81
+      ints: 82
+      ints: 86
+      ints: 87
+      ints: 89
+      ints: 90
+      ints: 93
+      ints: 94
+      ints: 95
+      ints: 98
+      ints: 100
+      ints: 101
+      ints: 102
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 6.0
+      floats: 7.0
+      floats: 7.0
+      floats: 6.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 7.0
+      floats: 5.0
+      floats: 5.0
+      floats: 3.0
+      floats: 6.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 8.0
+      floats: 7.0
+      floats: 6.0
+      floats: 7.0
+      floats: 7.0
+      floats: 5.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "node_sample1_output02"
+    output: "node_sample1_Y02"
+    name: "node_sample1_LabelEncoder"
+    op_type: "LabelEncoder"
+    attribute {
+      name: "keys_int64s"
+      ints: 4
+      ints: 8
+      ints: 9
+      ints: 11
+      ints: 12
+      ints: 13
+      ints: 16
+      ints: 18
+      ints: 19
+      ints: 22
+      ints: 24
+      ints: 25
+      ints: 28
+      ints: 29
+      ints: 31
+      ints: 32
+      ints: 34
+      ints: 39
+      ints: 40
+      ints: 41
+      ints: 42
+      ints: 46
+      ints: 47
+      ints: 49
+      ints: 50
+      ints: 53
+      ints: 54
+      ints: 55
+      ints: 60
+      ints: 61
+      ints: 63
+      ints: 64
+      ints: 66
+      ints: 68
+      ints: 69
+      ints: 71
+      ints: 72
+      type: INTS
+    }
+    attribute {
+      name: "values_floats"
+      floats: 1.0
+      floats: 4.0
+      floats: 1.0
+      floats: 1.0
+      floats: 7.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 2.0
+      floats: 32.0
+      floats: 1.0
+      floats: 1.0
+      floats: 3.0
+      floats: 9.0
+      floats: 1.0
+      floats: 1.0
+      floats: 2.0
+      floats: 2.0
+      floats: 1.0
+      floats: 7.0
+      floats: 5.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      floats: 1.0
+      type: FLOATS
+    }
+    domain: "ai.onnx.ml"
+  }
+  node {
+    input: "path_length0_Y0"
+    input: "path_length0_Reshapecst"
+    output: "path_length0_reshaped0"
+    name: "path_length0_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample0_reshaped0"
+    input: "dec_Powcst"
+    output: "eq2_0_C0"
+    name: "eq2_0_Equal"
+    op_type: "Equal"
+    domain: ""
+  }
+  node {
+    input: "node_sample1_Y02"
+    input: "path_length0_Reshapecst"
+    output: "node_sample1_reshaped0"
+    name: "node_sample1_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "path_length2_Y0"
+    input: "path_length0_Reshapecst"
+    output: "path_length2_reshaped0"
+    name: "path_length2_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample2_Y02"
+    input: "path_length0_Reshapecst"
+    output: "node_sample2_reshaped0"
+    name: "node_sample2_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample0_reshaped0"
+    input: "dec_Powcst"
+    output: "plus2_0_C0"
+    name: "plus2_0_Greater"
+    op_type: "Greater"
+    domain: ""
+  }
+  node {
+    input: "path_length1_Y0"
+    input: "path_length0_Reshapecst"
+    output: "path_length1_reshaped0"
+    name: "path_length1_Reshape"
+    op_type: "Reshape"
+    attribute {
+      name: "allowzero"
+      i: 0
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample1_reshaped0"
+    input: "dec_Powcst"
+    output: "plus2_1_C0"
+    name: "plus2_1_Greater"
+    op_type: "Greater"
+    domain: ""
+  }
+  node {
+    input: "plus2_0_C0"
+    output: "plus2_0_output0"
+    name: "plus2_0_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "eq2_0_C0"
+    output: "eq2_0_output0"
+    name: "eq2_0_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "node_sample2_reshaped0"
+    input: "dec_Powcst"
+    output: "eq2_2_C0"
+    name: "eq2_2_Equal"
+    op_type: "Equal"
+    domain: ""
+  }
+  node {
+    input: "node_sample2_reshaped0"
+    input: "dec_Powcst"
+    output: "plus2_2_C0"
+    name: "plus2_2_Greater"
+    op_type: "Greater"
+    domain: ""
+  }
+  node {
+    input: "node_sample1_reshaped0"
+    input: "dec_Powcst"
+    output: "eq2_1_C0"
+    name: "eq2_1_Equal"
+    op_type: "Equal"
+    domain: ""
+  }
+  node {
+    input: "plus2_1_C0"
+    output: "plus2_1_output0"
+    name: "plus2_1_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "plus2_0_output0"
+    input: "node_sample0_reshaped0"
+    output: "eqp2ps0_C0"
+    name: "eqp2ps0_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eq2_1_C0"
+    output: "eq2_1_output0"
+    name: "eq2_1_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "eq2_2_C0"
+    output: "eq2_2_output0"
+    name: "eq2_2_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "plus2_2_C0"
+    output: "plus2_2_output0"
+    name: "plus2_2_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 1
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "plus2_1_output0"
+    input: "node_sample1_reshaped0"
+    output: "eqp2ps1_C0"
+    name: "eqp2ps1_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps0_C0"
+    input: "eqp2p_m1_0_Addcst"
+    output: "eqp2p_m1_0_C0"
+    name: "eqp2p_m1_0_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "plus2_2_output0"
+    input: "node_sample2_reshaped0"
+    output: "eqp2ps2_C0"
+    name: "eqp2ps2_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps0_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp_ns0_max0"
+    name: "eqp_ns0_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps1_C0"
+    input: "eqp2p_m1_0_Addcst"
+    output: "eqp2p_m1_1_C0"
+    name: "eqp2p_m1_1_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps1_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp_ns1_max0"
+    name: "eqp_ns1_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps2_C0"
+    input: "eqp2p_m1_0_Addcst"
+    output: "eqp2p_m1_2_C0"
+    name: "eqp2p_m1_2_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp2ps2_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp_ns2_max0"
+    name: "eqp_ns2_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_0_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp2p_m1_0_max0"
+    name: "eqp2p_m1_0_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_0_C0"
+    input: "eqp2p_m1_0_Maxcst1"
+    output: "eqp2p_m1_0_max02"
+    name: "eqp2p_m1_0_Max1"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_1_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp2p_m1_1_max0"
+    name: "eqp2p_m1_1_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_1_C0"
+    input: "eqp2p_m1_0_Maxcst1"
+    output: "eqp2p_m1_1_max02"
+    name: "eqp2p_m1_1_Max1"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_0_max02"
+    input: "eqp_ns0_max0"
+    output: "eqp_ns0_C01"
+    name: "eqp_ns0_Div"
+    op_type: "Div"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_2_C0"
+    input: "eqp2p_m1_0_Maxcst1"
+    output: "eqp2p_m1_2_max02"
+    name: "eqp2p_m1_2_Max1"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_2_C0"
+    input: "eqp2p_m1_0_Maxcst"
+    output: "eqp2p_m1_2_max0"
+    name: "eqp2p_m1_2_Max"
+    op_type: "Max"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_0_max0"
+    output: "eqp_log0_output0"
+    name: "eqp_log0_Log"
+    op_type: "Log"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_1_max0"
+    output: "eqp_log1_output0"
+    name: "eqp_log1_Log"
+    op_type: "Log"
+    domain: ""
+  }
+  node {
+    input: "eqp_log0_output0"
+    input: "eqp_log0_Addcst"
+    output: "eqp_log0_C01"
+    name: "eqp_log0_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp_ns0_C01"
+    input: "eqp_ns0_Mulcst"
+    output: "eqp_ns0_C0"
+    name: "eqp_ns0_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_1_max02"
+    input: "eqp_ns1_max0"
+    output: "eqp_ns1_C01"
+    name: "eqp_ns1_Div"
+    op_type: "Div"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_2_max0"
+    output: "eqp_log2_output0"
+    name: "eqp_log2_Log"
+    op_type: "Log"
+    domain: ""
+  }
+  node {
+    input: "eqp2p_m1_2_max02"
+    input: "eqp_ns2_max0"
+    output: "eqp_ns2_C01"
+    name: "eqp_ns2_Div"
+    op_type: "Div"
+    domain: ""
+  }
+  node {
+    input: "eqp_log1_output0"
+    input: "eqp_log0_Addcst"
+    output: "eqp_log1_C01"
+    name: "eqp_log1_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp_ns2_C01"
+    input: "eqp_ns0_Mulcst"
+    output: "eqp_ns2_C0"
+    name: "eqp_ns2_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp_log2_output0"
+    input: "eqp_log0_Addcst"
+    output: "eqp_log2_C01"
+    name: "eqp_log2_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp_ns1_C01"
+    input: "eqp_ns0_Mulcst"
+    output: "eqp_ns1_C0"
+    name: "eqp_ns1_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp_log0_C01"
+    input: "dec_Powcst"
+    output: "eqp_log0_C0"
+    name: "eqp_log0_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp_log2_C01"
+    input: "dec_Powcst"
+    output: "eqp_log2_C0"
+    name: "eqp_log2_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp_log1_C01"
+    input: "dec_Powcst"
+    output: "eqp_log1_C0"
+    name: "eqp_log1_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eqp_log0_C0"
+    input: "eqp_ns0_C0"
+    output: "avlog0_C01"
+    name: "avlog0_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp_log2_C0"
+    input: "eqp_ns2_C0"
+    output: "avlog2_C01"
+    name: "avlog2_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eqp_log1_C0"
+    input: "eqp_ns1_C0"
+    output: "avlog1_C01"
+    name: "avlog1_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "avlog0_C01"
+    input: "plus2_0_output0"
+    output: "avlog0_C0"
+    name: "avlog0_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "avlog1_C01"
+    input: "plus2_1_output0"
+    output: "avlog1_C0"
+    name: "avlog1_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eq2_0_output0"
+    input: "avlog0_C0"
+    output: "avpl0_C0"
+    name: "avpl0_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "avlog2_C01"
+    input: "plus2_2_output0"
+    output: "avlog2_C0"
+    name: "avlog2_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "eq2_1_output0"
+    input: "avlog1_C0"
+    output: "avpl1_C0"
+    name: "avpl1_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "path_length0_reshaped0"
+    input: "avpl0_C0"
+    output: "depth0_C01"
+    name: "depth0_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "eq2_2_output0"
+    input: "avlog2_C0"
+    output: "avpl2_C0"
+    name: "avpl2_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "path_length1_reshaped0"
+    input: "avpl1_C0"
+    output: "depth1_C01"
+    name: "depth1_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "path_length2_reshaped0"
+    input: "avpl2_C0"
+    output: "depth2_C01"
+    name: "depth2_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "depth0_C01"
+    input: "eqp2p_m1_0_Addcst"
+    output: "depth0_C0"
+    name: "depth0_Add1"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "depth2_C01"
+    input: "eqp2p_m1_0_Addcst"
+    output: "depth2_C0"
+    name: "depth2_Add1"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "depth1_C01"
+    input: "eqp2p_m1_0_Addcst"
+    output: "depth1_C0"
+    name: "depth1_Add1"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "depth0_C0"
+    input: "depth1_C0"
+    input: "depth2_C0"
+    output: "dec_sum0"
+    name: "dec_Sum"
+    op_type: "Sum"
+    domain: ""
+  }
+  node {
+    input: "dec_sum0"
+    input: "dec_Divcst"
+    output: "dec_C0"
+    name: "dec_Div"
+    op_type: "Div"
+    domain: ""
+  }
+  node {
+    input: "dec_C0"
+    output: "dec_Y01"
+    name: "dec_Neg"
+    op_type: "Neg"
+    domain: ""
+  }
+  node {
+    input: "dec_Powcst"
+    input: "dec_Y01"
+    output: "dec_Z0"
+    name: "dec_Pow"
+    op_type: "Pow"
+    domain: ""
+  }
+  node {
+    input: "dec_Z0"
+    output: "dec_Y0"
+    name: "dec_Neg1"
+    op_type: "Neg"
+    domain: ""
+  }
+  node {
+    input: "dec_Y0"
+    input: "dec_Addcst"
+    output: "scores"
+    name: "dec_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  node {
+    input: "scores"
+    input: "eqp2p_m1_0_Maxcst1"
+    output: "predict_C01"
+    name: "predict_Less"
+    op_type: "Less"
+    domain: ""
+  }
+  node {
+    input: "predict_C01"
+    output: "predict_output0"
+    name: "predict_Cast"
+    op_type: "Cast"
+    attribute {
+      name: "to"
+      i: 7
+      type: INT
+    }
+    domain: ""
+  }
+  node {
+    input: "predict_output0"
+    input: "predict_Mulcst"
+    output: "predict_C0"
+    name: "predict_Mul"
+    op_type: "Mul"
+    domain: ""
+  }
+  node {
+    input: "predict_C0"
+    input: "predict_Addcst"
+    output: "label"
+    name: "predict_Add"
+    op_type: "Add"
+    domain: ""
+  }
+  name: "ONNX(IsolationForest)"
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: 2.0
+    name: "dec_Powcst"
+  }
+  initializer {
+    dims: 2
+    data_type: 7
+    int64_data: 0
+    int64_data: 1
+    name: "node_sample0_Gathercst"
+  }
+  initializer {
+    dims: 2
+    data_type: 7
+    int64_data: -1
+    int64_data: 1
+    name: "path_length0_Reshapecst"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: -1.0
+    name: "eqp2p_m1_0_Addcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: 1.0
+    name: "eqp2p_m1_0_Maxcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: 0.5772156715393066
+    name: "eqp_log0_Addcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: 0.0
+    name: "eqp2p_m1_0_Maxcst1"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: -2.0
+    name: "eqp_ns0_Mulcst"
+  }
+  initializer {
+    dims: 1
+    dims: 1
+    data_type: 1
+    float_data: 25.094013214111328
+    name: "dec_Divcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 1
+    float_data: 0.5
+    name: "dec_Addcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 7
+    int64_data: -2
+    name: "predict_Mulcst"
+  }
+  initializer {
+    dims: 1
+    data_type: 7
+    int64_data: 1
+    name: "predict_Addcst"
+  }
+  input {
+    name: "X"
+    type {
+      tensor_type {
+        elem_type: 1
+        shape {
+          dim {
+          }
+          dim {
+            dim_value: 2
+          }
+        }
+      }
+    }
+  }
+  output {
+    name: "label"
+    type {
+      tensor_type {
+        elem_type: 7
+        shape {
+          dim {
+          }
+          dim {
+            dim_value: 1
+          }
+        }
+      }
+    }
+  }
+  output {
+    name: "scores"
+    type {
+      tensor_type {
+        elem_type: 1
+        shape {
+          dim {
+          }
+          dim {
+            dim_value: 1
+          }
+        }
+      }
+    }
+  }
+}
+opset_import {
+  domain: ""
+  version: 15
+}
+opset_import {
+  domain: "ai.onnx.ml"
+  version: 2
+}
+
+
+

The last line shows the opsets. +Let’s extract it.

+
domains = onx.opset_import
+for dom in domains:
+    print("domain: %r, version: %r" % (dom.domain, dom.version))
+
+
+
domain: '', version: 15
+domain: 'ai.onnx.ml', version: 2
+
+
+

There are two opsets, one for standard operators, +the other for machine learning operators.

+
+
+

ONNX and opset#

+

The converter can convert a model to an older opset +than the default one, from 1 to the last available one.

+
def get_domain_opset(onx):
+    domains = onx.opset_import
+    res = [{'domain': dom.domain, 'version': dom.version}
+           for dom in domains]
+    return {d['domain']: d['version'] for d in res}
+
+
+for opset in range(6, onnx_opset_version() + 1):
+    try:
+        onx = to_onnx(model, X[:1].astype(numpy.float32),
+                      target_opset={'': opset, 'ai.onnx.ml': 2})
+    except RuntimeError as e:
+        print('target: %r error: %r' % (opset, e))
+        continue
+    nodes = len(onx.graph.node)
+    print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))
+
+
+
target: 6 --> {'ai.onnx.ml': 2, '': 6} 91
+target: 7 --> {'': 7, 'ai.onnx.ml': 2} 91
+target: 8 --> {'ai.onnx.ml': 2, '': 8} 91
+target: 9 --> {'ai.onnx.ml': 2, '': 9} 91
+target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
+target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
+target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
+target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
+target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
+target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91
+target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91
+target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91
+target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91
+/home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version.
+  warnings.warn(
+target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.")
+
+
+

It shows that the model cannot be converted for opset +below 5. Operator Reshape changed in +opset 5: a parameter became an input. The converter +does not support opset < 5 because runtimes usually do not.

+
+
+

Other opsets#

+

The previous example changed the opset of the main domain +'' but the other opset domain can be changed as well.

+
for opset in range(9, onnx_opset_version() + 1):
+    for opset_ml in range(1, 4):
+        tops = {'': opset, 'ai.onnx.ml': opset_ml}
+        try:
+            print("try target_opset:", tops)
+            onx = to_onnx(
+                model, X[:1].astype(numpy.float32), target_opset=tops)
+        except RuntimeError as e:
+            print('target: %r error: %r' % (opset, e))
+            continue
+        nodes = len(onx.graph.node)
+        print('target: %r --> %s %d' % (opset, get_domain_opset(onx), nodes))
+
+
+
try target_opset: {'': 9, 'ai.onnx.ml': 1}
+target: 9 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 9, 'ai.onnx.ml': 2}
+target: 9 --> {'ai.onnx.ml': 2, '': 9} 91
+try target_opset: {'': 9, 'ai.onnx.ml': 3}
+target: 9 --> {'ai.onnx.ml': 2, '': 9} 91
+try target_opset: {'': 10, 'ai.onnx.ml': 1}
+target: 10 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 10, 'ai.onnx.ml': 2}
+target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
+try target_opset: {'': 10, 'ai.onnx.ml': 3}
+target: 10 --> {'ai.onnx.ml': 2, '': 10} 91
+try target_opset: {'': 11, 'ai.onnx.ml': 1}
+target: 11 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 11, 'ai.onnx.ml': 2}
+target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
+try target_opset: {'': 11, 'ai.onnx.ml': 3}
+target: 11 --> {'ai.onnx.ml': 2, '': 11} 91
+try target_opset: {'': 12, 'ai.onnx.ml': 1}
+target: 12 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 12, 'ai.onnx.ml': 2}
+target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 12, 'ai.onnx.ml': 3}
+target: 12 --> {'': 12, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 13, 'ai.onnx.ml': 1}
+target: 13 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 13, 'ai.onnx.ml': 2}
+target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 13, 'ai.onnx.ml': 3}
+target: 13 --> {'': 13, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 14, 'ai.onnx.ml': 1}
+target: 14 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 14, 'ai.onnx.ml': 2}
+target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 14, 'ai.onnx.ml': 3}
+target: 14 --> {'': 14, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 15, 'ai.onnx.ml': 1}
+target: 15 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 15, 'ai.onnx.ml': 2}
+target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 15, 'ai.onnx.ml': 3}
+target: 15 --> {'': 15, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 16, 'ai.onnx.ml': 1}
+target: 16 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 16, 'ai.onnx.ml': 2}
+target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 16, 'ai.onnx.ml': 3}
+target: 16 --> {'': 16, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 17, 'ai.onnx.ml': 1}
+target: 17 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 17, 'ai.onnx.ml': 2}
+target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 17, 'ai.onnx.ml': 3}
+target: 17 --> {'': 17, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 18, 'ai.onnx.ml': 1}
+target: 18 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 18, 'ai.onnx.ml': 2}
+target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 18, 'ai.onnx.ml': 3}
+target: 18 --> {'': 18, 'ai.onnx.ml': 2} 91
+try target_opset: {'': 19, 'ai.onnx.ml': 1}
+/home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version.
+  warnings.warn(
+target: 19 error: RuntimeError("This converter requires at least opset 2 for domain 'ai.onnx.ml'.")
+try target_opset: {'': 19, 'ai.onnx.ml': 2}
+target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.")
+try target_opset: {'': 19, 'ai.onnx.ml': 3}
+/home/xadupre/github/sklearn-onnx/skl2onnx/common/_topology.py:1405: UserWarning: Parameter target_opset 19 > 18 is higher than the the latest tested version.
+  warnings.warn(
+target: 19 error: RuntimeError("The model is using version 19 of domain '' not supported yet by this library. You need to specify target_opset={'': 18}.")
+
+
+

Total running time of the script: ( 0 minutes 1.345 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_dbegin_options.html b/auto_tutorial/plot_dbegin_options.html index 0272f13dc..7b83a6fc2 100644 --- a/auto_tutorial/plot_dbegin_options.html +++ b/auto_tutorial/plot_dbegin_options.html @@ -1,938 +1,786 @@ - - - - - - - - - One model, many possible conversions with options — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

One model, many possible conversions with options#

-

There is not one way to convert a model. A new operator -might have been added in a newer version of ONNX -and that speeds up the converted model. The rational choice -would be to use this new operator but what means the associated -runtime has an implementation for it. What if two different -users needs two different conversion for the same model? -Let’s see how this may be done.

- -
-

Option zipmap#

-

Every classifier is by design converted into an ONNX graph which outputs -two results: the predicted label and the prediction probabilites -for every label. By default, the labels are integers and the -probabilites are stored in dictionaries. That’s the purpose -of operator ZipMap added at the end of the following graph.

- -
- -

This operator is not really efficient as it copies every probabilies and -labels in a different container. This time is usually significant for -small classifiers. Then it makes sense to remove it.

- -
- -

There might be in the graph many classifiers, it is important to have -a way to specify which classifier should keep its ZipMap -and which is not. So it is possible to specify options by id.

-
from pprint import pformat
-import numpy
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from sklearn.ensemble import RandomForestClassifier
-from sklearn.preprocessing import MinMaxScaler
-from sklearn.pipeline import Pipeline
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
-from sklearn.linear_model import LogisticRegression
-from skl2onnx.common._registration import _converter_pool
-from skl2onnx import to_onnx
-from onnxruntime import InferenceSession
-from mlprodict.onnxrt import OnnxInference
-
-iris = load_iris()
-X, y = iris.data, iris.target
-X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
-clr = LogisticRegression()
-clr.fit(X_train, y_train)
-
-model_def = to_onnx(clr, X_train.astype(numpy.float32),
-                    options={id(clr): {'zipmap': False}})
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf)
-
-
-

Out:

-
D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning: lbfgs failed to converge (status=1):
-STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
-
-Increase the number of iterations (max_iter) or scale the data as shown in:
-    https://scikit-learn.org/stable/modules/preprocessing.html
-Please also refer to the documentation for alternative solver options:
-    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
-  n_iter_i = _check_optimize_result(
-OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # inputs
-        X = dict_inputs['X']
-        (label, probability_tensor, ) = n0_linearclassifier(X)
-        (probabilities, ) = n1_normalizer(probability_tensor)
-        return {
-            'label': label,
-            'probabilities': probabilities,
-        }
-
-
-

Visually.

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options

We need to compare that kind of visualisation to -what it would give with operator ZipMap.

-
model_def = to_onnx(clr, X_train.astype(numpy.float32))
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # inputs
-        X = dict_inputs['X']
-        (label, probability_tensor, ) = n0_linearclassifier(X)
-        (output_label, ) = n1_cast(label)
-        (probabilities, ) = n2_normalizer(probability_tensor)
-        (output_probability, ) = n3_zipmap(probabilities)
-        return {
-            'output_label': output_label,
-            'output_probability': output_probability,
-        }
-
-
-

Visually.

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options

Using function id has one flaw: it is not pickable. -It is just better to use strings.

-
model_def = to_onnx(clr, X_train.astype(numpy.float32),
-                    options={'zipmap': False})
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # inputs
-        X = dict_inputs['X']
-        (label, probability_tensor, ) = n0_linearclassifier(X)
-        (probabilities, ) = n1_normalizer(probability_tensor)
-        return {
-            'label': label,
-            'probabilities': probabilities,
-        }
-
-
-

Visually.

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options
-
-

Option in a pipeline#

-

In a pipeline, sklearn-onnx uses the same -name convention.

-
pipe = Pipeline([
-    ('norm', MinMaxScaler()),
-    ('clr', LogisticRegression())
-])
-pipe.fit(X_train, y_train)
-
-model_def = to_onnx(pipe, X_train.astype(numpy.float32),
-                    options={'clr__zipmap': False})
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # init: Ad_Addcst (Ad_Addcst)
-        # init: Mu_Mulcst (Mu_Mulcst)
-        # inputs
-        X = dict_inputs['X']
-        (Ca_output0, ) = n0_cast(X)
-        (Mu_C0, ) = n1_mul(Ca_output0, Mu_Mulcst)
-        (variable, ) = n2_add(Mu_C0, Ad_Addcst)
-        (label, probability_tensor, ) = n3_linearclassifier(variable)
-        (probabilities, ) = n4_normalizer(probability_tensor)
-        return {
-            'label': label,
-            'probabilities': probabilities,
-        }
-
-
-

Visually.

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options
-
-

Option raw_scores#

-

Every classifier is converted in a graph which -returns probabilities by default. But many models -compute unscaled raw_scores. -First, with probabilities:

-
pipe = Pipeline([
-    ('norm', MinMaxScaler()),
-    ('clr', LogisticRegression())
-])
-pipe.fit(X_train, y_train)
-
-model_def = to_onnx(
-    pipe, X_train.astype(numpy.float32),
-    options={id(pipe): {'zipmap': False}})
-
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
-
-
-

Out:

-
{'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984],
-       [0.7944385 , 0.19728662, 0.00827491],
-       [0.85557765, 0.13792053, 0.00650185],
-       [0.8262804 , 0.16634221, 0.00737737],
-       [0.90050155, 0.092388  , 0.00711049]], dtype=float32)}
-
-
-

Then with raw scores:

-
model_def = to_onnx(
-    pipe, X_train.astype(numpy.float32),
-    options={id(pipe): {'raw_scores': True, 'zipmap': False}})
-
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
-
-
-

Out:

-
{'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984],
-       [0.7944385 , 0.19728662, 0.00827491],
-       [0.85557765, 0.13792053, 0.00650185],
-       [0.8262804 , 0.16634221, 0.00737737],
-       [0.90050155, 0.092388  , 0.00711049]], dtype=float32)}
-
-
-

It did not seem to work… We need to tell -that applies on a specific part of the pipeline -and not the whole pipeline.

-
model_def = to_onnx(
-    pipe, X_train.astype(numpy.float32),
-    options={id(pipe.steps[1][1]): {'raw_scores': True, 'zipmap': False}})
-
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
-
-
-

Out:

-
{'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[ 2.2707398 ,  0.18354762, -2.4542873 ],
-       [ 1.9857951 ,  0.5928172 , -2.5786123 ],
-       [ 2.2349296 ,  0.4098304 , -2.6447601 ],
-       [ 2.1071343 ,  0.5042473 , -2.6113818 ],
-       [ 2.3727787 ,  0.095824  , -2.4686027 ]], dtype=float32)}
-
-
-

There are negative values. That works. -Strings are still easier to use.

-
model_def = to_onnx(
-    pipe, X_train.astype(numpy.float32),
-    options={'clr__raw_scores': True, 'clr__zipmap': False})
-
-oinf = OnnxInference(model_def, runtime='python_compiled')
-print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
-
-
-

Out:

-
{'label': array([0, 0, 0, 0, 0], dtype=int64), 'probabilities': array([[ 2.2707398 ,  0.18354762, -2.4542873 ],
-       [ 1.9857951 ,  0.5928172 , -2.5786123 ],
-       [ 2.2349296 ,  0.4098304 , -2.6447601 ],
-       [ 2.1071343 ,  0.5042473 , -2.6113818 ],
-       [ 2.3727787 ,  0.095824  , -2.4686027 ]], dtype=float32)}
-
-
-

Negative figures. We still have raw scores.

-
-
-

Option decision_path#

-

scikit-learn implements a function to retrieve the -decision path. It can be enabled by option decision_path.

-
clrrf = RandomForestClassifier(n_estimators=2, max_depth=2)
-clrrf.fit(X_train, y_train)
-clrrf.predict(X_test[:2])
-paths, n_nodes_ptr = clrrf.decision_path(X_test[:2])
-print(paths.todense())
-
-model_def = to_onnx(clrrf, X_train.astype(numpy.float32),
-                    options={id(clrrf): {'decision_path': True,
-                                         'zipmap': False}})
-sess = InferenceSession(model_def.SerializeToString())
-
-
-

Out:

-
[[1 0 1 0 1 1 0 1 0 1]
- [1 0 1 0 1 1 0 1 0 1]]
-
-
-

The model produces 3 outputs.

-
print([o.name for o in sess.get_outputs()])
-
-
-

Out:

-
['label', 'probabilities', 'decision_path']
-
-
-

Let’s display the last one.

-
res = sess.run(None, {'X': X_test[:2].astype(numpy.float32)})
-print(res[-1])
-
-
-

Out:

-
[['10101' '10101']
- ['10101' '10101']]
-
-
-
-
-

List of available options#

-

Options are registered for every converted to detect any -supported options while running the conversion.

-
all_opts = set()
-for k, v in sorted(_converter_pool.items()):
-    opts = v.get_allowed_options()
-    if not isinstance(opts, dict):
-        continue
-    name = k.replace('Sklearn', '')
-    print('%s%s %r' % (name, " " * (30 - len(name)), opts))
-    for o in opts:
-        all_opts.add(o)
-
-print('all options:', pformat(list(sorted(all_opts))))
-
-
-

Out:

-
LightGbmLGBMClassifier         {'nocl': [True, False], 'zipmap': [True, False, 'columns']}
-LightGbmLGBMRegressor          {'split': None}
-AdaBoostClassifier             {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-BaggingClassifier              {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-BayesianGaussianMixture        {'score_samples': [True, False]}
-BayesianRidge                  {'return_std': [True, False]}
-BernoulliNB                    {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-CalibratedClassifierCV         {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-CategoricalNB                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-ComplementNB                   {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-CountVectorizer                {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
-DecisionTreeClassifier         {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
-DecisionTreeRegressor          {'decision_path': [True, False], 'decision_leaf': [True, False]}
-ExtraTreeClassifier            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
-ExtraTreeRegressor             {'decision_path': [True, False], 'decision_leaf': [True, False]}
-ExtraTreesClassifier           {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
-ExtraTreesRegressor            {'decision_path': [True, False], 'decision_leaf': [True, False]}
-GaussianMixture                {'score_samples': [True, False]}
-GaussianNB                     {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-GaussianProcessClassifier      {'optim': [None, 'cdist'], 'nocl': [False, True], 'output_class_labels': [False, True], 'zipmap': [False, True]}
-GaussianProcessRegressor       {'return_cov': [False, True], 'return_std': [False, True], 'optim': [None, 'cdist']}
-GradientBoostingClassifier     {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
-HistGradientBoostingClassifier {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
-HistGradientBoostingRegressor  {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
-IsolationForest                {'score_samples': [True, False]}
-KMeans                         {'gemm': [True, False]}
-KNNImputer                     {'optim': [None, 'cdist']}
-KNeighborsClassifier           {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'optim': [None, 'cdist']}
-KNeighborsRegressor            {'optim': [None, 'cdist']}
-KNeighborsTransformer          {'optim': [None, 'cdist']}
-KernelPCA                      {'optim': [None, 'cdist']}
-LinearClassifier               {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-LinearSVC                      {'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-LocalOutlierFactor             {'score_samples': [True, False], 'optim': [None, 'cdist']}
-MLPClassifier                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-MaxAbsScaler                   {'div': ['std', 'div', 'div_cast']}
-MiniBatchKMeans                {'gemm': [True, False]}
-MultiOutputClassifier          {'nocl': [False, True], 'output_class_labels': [False, True], 'zipmap': [False, True]}
-MultinomialNB                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-NearestNeighbors               {'optim': [None, 'cdist']}
-OneVsRestClassifier            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-RadiusNeighborsClassifier      {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'optim': [None, 'cdist']}
-RadiusNeighborsRegressor       {'optim': [None, 'cdist']}
-RandomForestClassifier         {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
-RandomForestRegressor          {'decision_path': [True, False], 'decision_leaf': [True, False]}
-RobustScaler                   {'div': ['std', 'div', 'div_cast']}
-SGDClassifier                  {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-SVC                            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-Scaler                         {'div': ['std', 'div', 'div_cast']}
-StackingClassifier             {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
-TfidfTransformer               {'nan': [True, False]}
-TfidfVectorizer                {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
-VotingClassifier               {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
-XGBoostXGBClassifier           {'nocl': [True, False], 'zipmap': [True, False, 'columns']}
-all options: ['decision_leaf',
- 'decision_path',
- 'div',
- 'gemm',
- 'keep_empty_string',
- 'nan',
- 'nocl',
- 'optim',
- 'output_class_labels',
- 'raw_scores',
- 'return_cov',
- 'return_std',
- 'score_samples',
- 'separators',
- 'split',
- 'tokenexp',
- 'zipmap']
-
-
-

Total running time of the script: ( 0 minutes 1.810 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + One model, many possible conversions with options - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

One model, many possible conversions with options#

+

There is not one way to convert a model. A new operator +might have been added in a newer version of ONNX +and that speeds up the converted model. The rational choice +would be to use this new operator but what means the associated +runtime has an implementation for it. What if two different +users needs two different conversion for the same model? +Let’s see how this may be done.

+
+

Option zipmap#

+

Every classifier is by design converted into an ONNX graph which outputs +two results: the predicted label and the prediction probabilites +for every label. By default, the labels are integers and the +probabilites are stored in dictionaries. That’s the purpose +of operator ZipMap added at the end of the following graph.

+ +
+ +

This operator is not really efficient as it copies every probabilies and +labels in a different container. This time is usually significant for +small classifiers. Then it makes sense to remove it.

+ +
+ +

There might be in the graph many classifiers, it is important to have +a way to specify which classifier should keep its ZipMap +and which is not. So it is possible to specify options by id.

+
from pprint import pformat
+import numpy
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from sklearn.ensemble import RandomForestClassifier
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.pipeline import Pipeline
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+from sklearn.linear_model import LogisticRegression
+from skl2onnx.common._registration import _converter_pool
+from skl2onnx import to_onnx
+from onnxruntime import InferenceSession
+from mlprodict.onnxrt import OnnxInference
+
+iris = load_iris()
+X, y = iris.data, iris.target
+X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
+clr = LogisticRegression()
+clr.fit(X_train, y_train)
+
+model_def = to_onnx(clr, X_train.astype(numpy.float32),
+                    options={id(clr): {'zipmap': False}})
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf)
+
+
+
/home/xadupre/github/scikit-learn/sklearn/linear_model/_logistic.py:458: ConvergenceWarning: lbfgs failed to converge (status=1):
+STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
+
+Increase the number of iterations (max_iter) or scale the data as shown in:
+    https://scikit-learn.org/stable/modules/preprocessing.html
+Please also refer to the documentation for alternative solver options:
+    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
+  n_iter_i = _check_optimize_result(
+OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # inputs
+        X = dict_inputs['X']
+        (label, probability_tensor, ) = n0_linearclassifier(X)
+        (probabilities, ) = n1_normalizer(probability_tensor)
+        return {
+            'label': label,
+            'probabilities': probabilities,
+        }
+
+
+

Visually.

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options

We need to compare that kind of visualisation to +what it would give with operator ZipMap.

+
model_def = to_onnx(clr, X_train.astype(numpy.float32))
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf)
+
+
+
OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # inputs
+        X = dict_inputs['X']
+        (label, probability_tensor, ) = n0_linearclassifier(X)
+        (output_label, ) = n1_cast(label)
+        (probabilities, ) = n2_normalizer(probability_tensor)
+        (output_probability, ) = n3_zipmap(probabilities)
+        return {
+            'output_label': output_label,
+            'output_probability': output_probability,
+        }
+
+
+

Visually.

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options

Using function id has one flaw: it is not pickable. +It is just better to use strings.

+
model_def = to_onnx(clr, X_train.astype(numpy.float32),
+                    options={'zipmap': False})
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf)
+
+
+
OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # inputs
+        X = dict_inputs['X']
+        (label, probability_tensor, ) = n0_linearclassifier(X)
+        (probabilities, ) = n1_normalizer(probability_tensor)
+        return {
+            'label': label,
+            'probabilities': probabilities,
+        }
+
+
+

Visually.

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options
+
+

Option in a pipeline#

+

In a pipeline, sklearn-onnx uses the same +name convention.

+
pipe = Pipeline([
+    ('norm', MinMaxScaler()),
+    ('clr', LogisticRegression())
+])
+pipe.fit(X_train, y_train)
+
+model_def = to_onnx(pipe, X_train.astype(numpy.float32),
+                    options={'clr__zipmap': False})
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf)
+
+
+
OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # init: Ad_Addcst (Ad_Addcst)
+        # init: Mu_Mulcst (Mu_Mulcst)
+        # inputs
+        X = dict_inputs['X']
+        (Ca_output0, ) = n0_cast(X)
+        (Mu_C0, ) = n1_mul(Ca_output0, Mu_Mulcst)
+        (variable, ) = n2_add(Mu_C0, Ad_Addcst)
+        (label, probability_tensor, ) = n3_linearclassifier(variable)
+        (probabilities, ) = n4_normalizer(probability_tensor)
+        return {
+            'label': label,
+            'probabilities': probabilities,
+        }
+
+
+

Visually.

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options
+
+

Option raw_scores#

+

Every classifier is converted in a graph which +returns probabilities by default. But many models +compute unscaled raw_scores. +First, with probabilities:

+
pipe = Pipeline([
+    ('norm', MinMaxScaler()),
+    ('clr', LogisticRegression())
+])
+pipe.fit(X_train, y_train)
+
+model_def = to_onnx(
+    pipe, X_train.astype(numpy.float32),
+    options={id(pipe): {'zipmap': False}})
+
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
+
+
+
{'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984],
+       [0.7944385 , 0.19728662, 0.00827491],
+       [0.85557765, 0.13792053, 0.00650185],
+       [0.8262804 , 0.16634221, 0.00737737],
+       [0.90050155, 0.092388  , 0.00711049]], dtype=float32)}
+
+
+

Then with raw scores:

+
model_def = to_onnx(
+    pipe, X_train.astype(numpy.float32),
+    options={id(pipe): {'raw_scores': True, 'zipmap': False}})
+
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
+
+
+
{'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[0.88268626, 0.10948393, 0.00782984],
+       [0.7944385 , 0.19728662, 0.00827491],
+       [0.85557765, 0.13792053, 0.00650185],
+       [0.8262804 , 0.16634221, 0.00737737],
+       [0.90050155, 0.092388  , 0.00711049]], dtype=float32)}
+
+
+

It did not seem to work… We need to tell +that applies on a specific part of the pipeline +and not the whole pipeline.

+
model_def = to_onnx(
+    pipe, X_train.astype(numpy.float32),
+    options={id(pipe.steps[1][1]): {'raw_scores': True, 'zipmap': False}})
+
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
+
+
+
{'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[ 2.2707398 ,  0.18354762, -2.4542873 ],
+       [ 1.9857951 ,  0.5928172 , -2.5786123 ],
+       [ 2.2349296 ,  0.4098304 , -2.6447601 ],
+       [ 2.1071343 ,  0.5042473 , -2.6113818 ],
+       [ 2.3727787 ,  0.095824  , -2.4686027 ]], dtype=float32)}
+
+
+

There are negative values. That works. +Strings are still easier to use.

+
model_def = to_onnx(
+    pipe, X_train.astype(numpy.float32),
+    options={'clr__raw_scores': True, 'clr__zipmap': False})
+
+oinf = OnnxInference(model_def, runtime='python_compiled')
+print(oinf.run({'X': X.astype(numpy.float32)[:5]}))
+
+
+
{'label': array([0, 0, 0, 0, 0]), 'probabilities': array([[ 2.2707398 ,  0.18354762, -2.4542873 ],
+       [ 1.9857951 ,  0.5928172 , -2.5786123 ],
+       [ 2.2349296 ,  0.4098304 , -2.6447601 ],
+       [ 2.1071343 ,  0.5042473 , -2.6113818 ],
+       [ 2.3727787 ,  0.095824  , -2.4686027 ]], dtype=float32)}
+
+
+

Negative figures. We still have raw scores.

+
+
+

Option decision_path#

+

scikit-learn implements a function to retrieve the +decision path. It can be enabled by option decision_path.

+
clrrf = RandomForestClassifier(n_estimators=2, max_depth=2)
+clrrf.fit(X_train, y_train)
+clrrf.predict(X_test[:2])
+paths, n_nodes_ptr = clrrf.decision_path(X_test[:2])
+print(paths.todense())
+
+model_def = to_onnx(clrrf, X_train.astype(numpy.float32),
+                    options={id(clrrf): {'decision_path': True,
+                                         'zipmap': False}})
+sess = InferenceSession(model_def.SerializeToString())
+
+
+
[[1 0 1 0 1 1 0 1 1 0]
+ [1 0 1 0 1 1 0 1 1 0]]
+
+
+

The model produces 3 outputs.

+
print([o.name for o in sess.get_outputs()])
+
+
+
['label', 'probabilities', 'decision_path']
+
+
+

Let’s display the last one.

+
res = sess.run(None, {'X': X_test[:2].astype(numpy.float32)})
+print(res[-1])
+
+
+
[['10101' '10110']
+ ['10101' '10110']]
+
+
+
+
+

List of available options#

+

Options are registered for every converted to detect any +supported options while running the conversion.

+
all_opts = set()
+for k, v in sorted(_converter_pool.items()):
+    opts = v.get_allowed_options()
+    if not isinstance(opts, dict):
+        continue
+    name = k.replace('Sklearn', '')
+    print('%s%s %r' % (name, " " * (30 - len(name)), opts))
+    for o in opts:
+        all_opts.add(o)
+
+print('all options:', pformat(list(sorted(all_opts))))
+
+
+
LgbmClassifier                 {'zipmap': [True, False], 'nocl': [True, False]}
+LightGbmBooster                {'cast': [True, False]}
+LightGbmLGBMClassifier         {'nocl': [True, False], 'zipmap': [True, False, 'columns']}
+LightGbmLGBMRegressor          {'split': None}
+Skl2onnxTraceableCountVectorizer {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
+Skl2onnxTraceableTfidfVectorizer {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
+AdaBoostClassifier             {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+BaggingClassifier              {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+BayesianGaussianMixture        {'score_samples': [True, False]}
+BayesianRidge                  {'return_std': [True, False]}
+BernoulliNB                    {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+CalibratedClassifierCV         {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+CategoricalNB                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+ComplementNB                   {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+CountVectorizer                {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
+DecisionTreeClassifier         {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
+DecisionTreeRegressor          {'decision_path': [True, False], 'decision_leaf': [True, False]}
+ExtraTreeClassifier            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
+ExtraTreeRegressor             {'decision_path': [True, False], 'decision_leaf': [True, False]}
+ExtraTreesClassifier           {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
+ExtraTreesRegressor            {'decision_path': [True, False], 'decision_leaf': [True, False]}
+GaussianMixture                {'score_samples': [True, False]}
+GaussianNB                     {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+GaussianProcessClassifier      {'optim': [None, 'cdist'], 'nocl': [False, True], 'output_class_labels': [False, True], 'zipmap': [False, True]}
+GaussianProcessRegressor       {'return_cov': [False, True], 'return_std': [False, True], 'optim': [None, 'cdist']}
+GradientBoostingClassifier     {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
+HistGradientBoostingClassifier {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
+HistGradientBoostingRegressor  {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'nocl': [True, False]}
+IsolationForest                {'score_samples': [True, False]}
+KMeans                         {'gemm': [True, False]}
+KNNImputer                     {'optim': [None, 'cdist']}
+KNeighborsClassifier           {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'optim': [None, 'cdist']}
+KNeighborsRegressor            {'optim': [None, 'cdist']}
+KNeighborsTransformer          {'optim': [None, 'cdist']}
+KernelPCA                      {'optim': [None, 'cdist']}
+LinearClassifier               {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+LinearSVC                      {'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+LocalOutlierFactor             {'score_samples': [True, False], 'optim': [None, 'cdist']}
+MLPClassifier                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+MaxAbsScaler                   {'div': ['std', 'div', 'div_cast']}
+MiniBatchKMeans                {'gemm': [True, False]}
+MultiOutputClassifier          {'nocl': [False, True], 'output_class_labels': [False, True], 'zipmap': [False, True]}
+MultinomialNB                  {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+NearestNeighbors               {'optim': [None, 'cdist']}
+OneVsOneClassifier             {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True]}
+OneVsRestClassifier            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+QuadraticDiscriminantAnalysis  {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True]}
+RadiusNeighborsClassifier      {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'raw_scores': [True, False], 'output_class_labels': [False, True], 'optim': [None, 'cdist']}
+RadiusNeighborsRegressor       {'optim': [None, 'cdist']}
+RandomForestClassifier         {'zipmap': [True, False, 'columns'], 'raw_scores': [True, False], 'nocl': [True, False], 'output_class_labels': [False, True], 'decision_path': [True, False], 'decision_leaf': [True, False]}
+RandomForestRegressor          {'decision_path': [True, False], 'decision_leaf': [True, False]}
+RobustScaler                   {'div': ['std', 'div', 'div_cast']}
+SGDClassifier                  {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+SVC                            {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+Scaler                         {'div': ['std', 'div', 'div_cast']}
+StackingClassifier             {'zipmap': [True, False, 'columns'], 'nocl': [True, False], 'output_class_labels': [False, True], 'raw_scores': [True, False]}
+TfidfTransformer               {'nan': [True, False]}
+TfidfVectorizer                {'tokenexp': None, 'separators': None, 'nan': [True, False], 'keep_empty_string': [True, False]}
+VotingClassifier               {'zipmap': [True, False, 'columns'], 'output_class_labels': [False, True], 'nocl': [True, False]}
+WrappedLightGbmBoosterClassifier {'zipmap': [True, False], 'nocl': [True, False]}
+XGBoostXGBClassifier           {'zipmap': [True, False], 'raw_scores': [True, False], 'nocl': [True, False]}
+fct_score_cdist_sum            {'cdist': [None, 'single-node']}
+all options: ['cast',
+ 'cdist',
+ 'decision_leaf',
+ 'decision_path',
+ 'div',
+ 'gemm',
+ 'keep_empty_string',
+ 'nan',
+ 'nocl',
+ 'optim',
+ 'output_class_labels',
+ 'raw_scores',
+ 'return_cov',
+ 'return_std',
+ 'score_samples',
+ 'separators',
+ 'split',
+ 'tokenexp',
+ 'zipmap']
+
+
+

Total running time of the script: ( 0 minutes 1.322 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_dbegin_options_list.html b/auto_tutorial/plot_dbegin_options_list.html index ea8abcdb1..ed3811afd 100644 --- a/auto_tutorial/plot_dbegin_options_list.html +++ b/auto_tutorial/plot_dbegin_options_list.html @@ -1,651 +1,506 @@ - - - - - - - - - Black list operators when converting — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Black list operators when converting#

-

Some runtimes do not implement a runtime for every -available operator in ONNX. The converter does not know -that but it is possible to black some operators. Most of -the converters do not change their behaviour, they fail -if they use a black listed operator, a couple of them -produces a different ONNX graph.

- -
-

GaussianMixture#

-

The first converter to change its behaviour depending on a black list -of operators is for model GaussianMixture.

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-from timeit import timeit
-import numpy
-from onnxruntime import InferenceSession
-from sklearn.mixture import GaussianMixture
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
-from skl2onnx import to_onnx
-
-data = load_iris()
-X_train, X_test = train_test_split(data.data)
-model = GaussianMixture()
-model.fit(X_train)
-
-
-
-
GaussianMixture()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

Default conversion#

-
model_onnx = to_onnx(
-    model, X_train[:1].astype(numpy.float32),
-    options={id(model): {'score_samples': True}},
-    target_opset=12)
-sess = InferenceSession(model_onnx.SerializeToString())
-
-xt = X_test[:5].astype(numpy.float32)
-print(model.score_samples(xt))
-print(sess.run(None, {'X': xt})[2])
-
-
-

Out:

-
[-1.11113176 -2.15611455 -2.44624895 -2.01165533 -3.18516454]
-[[-1.1111317]
- [-2.156114 ]
- [-2.446249 ]
- [-2.0116558]
- [-3.1851645]]
-
-
-

Display the ONNX graph.

-
oinf = OnnxInference(model_onnx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options list
-
-

Conversion without ReduceLogSumExp#

-

Parameter black_op is used to tell the converter -not to use this operator. Let’s see what the converter -produces in that case.

-
model_onnx2 = to_onnx(
-    model, X_train[:1].astype(numpy.float32),
-    options={id(model): {'score_samples': True}},
-    black_op={'ReduceLogSumExp'},
-    target_opset=12)
-sess2 = InferenceSession(model_onnx2.SerializeToString())
-
-xt = X_test[:5].astype(numpy.float32)
-print(model.score_samples(xt))
-print(sess2.run(None, {'X': xt})[2])
-
-
-

Out:

-
[-1.11113176 -2.15611455 -2.44624895 -2.01165533 -3.18516454]
-[[-1.1111317]
- [-2.156114 ]
- [-2.446249 ]
- [-2.0116558]
- [-3.1851645]]
-
-
-

Display the ONNX graph.

-
oinf = OnnxInference(model_onnx2)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot dbegin options list
-
-

Processing time#

-
print(timeit(stmt="sess.run(None, {'X': xt})",
-             number=10000, globals={'sess': sess, 'xt': xt}))
-
-print(timeit(stmt="sess2.run(None, {'X': xt})",
-             number=10000, globals={'sess2': sess2, 'xt': xt}))
-
-
-

Out:

-
0.3173113000000001
-0.3591471000000013
-
-
-

The model using ReduceLogSumExp is much faster.

-
-
-

If the converter cannot convert without…#

-

Many converters do not consider the white and black lists -of operators. If a converter fails to convert without using -a blacklisted operator (or only whitelisted operators), -skl2onnx raises an error.

-
try:
-    to_onnx(
-        model, X_train[:1].astype(numpy.float32),
-        options={id(model): {'score_samples': True}},
-        black_op={'ReduceLogSumExp', 'Add'},
-        target_opset=12)
-except RuntimeError as e:
-    print('Error:', e)
-
-
-

Out:

-
Error: Operator 'Add' is black listed.
-
-
-

Total running time of the script: ( 0 minutes 1.509 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Black list operators when converting - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Black list operators when converting#

+

Some runtimes do not implement a runtime for every +available operator in ONNX. The converter does not know +that but it is possible to black some operators. Most of +the converters do not change their behaviour, they fail +if they use a black listed operator, a couple of them +produces a different ONNX graph.

+
+

GaussianMixture#

+

The first converter to change its behaviour depending on a black list +of operators is for model GaussianMixture.

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+from timeit import timeit
+import numpy
+from onnxruntime import InferenceSession
+from sklearn.mixture import GaussianMixture
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+from skl2onnx import to_onnx
+
+data = load_iris()
+X_train, X_test = train_test_split(data.data)
+model = GaussianMixture()
+model.fit(X_train)
+
+
+
+
GaussianMixture()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Default conversion#

+
model_onnx = to_onnx(
+    model, X_train[:1].astype(numpy.float32),
+    options={id(model): {'score_samples': True}},
+    target_opset=12)
+sess = InferenceSession(model_onnx.SerializeToString())
+
+xt = X_test[:5].astype(numpy.float32)
+print(model.score_samples(xt))
+print(sess.run(None, {'X': xt})[2])
+
+
+
[-1.20177945 -3.55755314 -1.50589005 -5.95852584 -2.41493749]
+[[-1.2017796]
+ [-3.5575526]
+ [-1.5058892]
+ [-5.9585238]
+ [-2.4149387]]
+
+
+

Display the ONNX graph.

+
oinf = OnnxInference(model_onnx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options list
+
+

Conversion without ReduceLogSumExp#

+

Parameter black_op is used to tell the converter +not to use this operator. Let’s see what the converter +produces in that case.

+
model_onnx2 = to_onnx(
+    model, X_train[:1].astype(numpy.float32),
+    options={id(model): {'score_samples': True}},
+    black_op={'ReduceLogSumExp'},
+    target_opset=12)
+sess2 = InferenceSession(model_onnx2.SerializeToString())
+
+xt = X_test[:5].astype(numpy.float32)
+print(model.score_samples(xt))
+print(sess2.run(None, {'X': xt})[2])
+
+
+
[-1.20177945 -3.55755314 -1.50589005 -5.95852584 -2.41493749]
+[[-1.2017796]
+ [-3.5575526]
+ [-1.5058892]
+ [-5.958523 ]
+ [-2.4149382]]
+
+
+

Display the ONNX graph.

+
oinf = OnnxInference(model_onnx2)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot dbegin options list
+
+

Processing time#

+
print(timeit(stmt="sess.run(None, {'X': xt})",
+             number=10000, globals={'sess': sess, 'xt': xt}))
+
+print(timeit(stmt="sess2.run(None, {'X': xt})",
+             number=10000, globals={'sess2': sess2, 'xt': xt}))
+
+
+
0.23519209400001273
+0.24321569299991097
+
+
+

The model using ReduceLogSumExp is much faster.

+
+
+

If the converter cannot convert without…#

+

Many converters do not consider the white and black lists +of operators. If a converter fails to convert without using +a blacklisted operator (or only whitelisted operators), +skl2onnx raises an error.

+
try:
+    to_onnx(
+        model, X_train[:1].astype(numpy.float32),
+        options={id(model): {'score_samples': True}},
+        black_op={'ReduceLogSumExp', 'Add'},
+        target_opset=12)
+except RuntimeError as e:
+    print('Error:', e)
+
+
+
Error: Operator 'Add' is black listed.
+
+
+

Total running time of the script: ( 0 minutes 1.110 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_dbegin_options_zipmap.html b/auto_tutorial/plot_dbegin_options_zipmap.html index 0454ddfa4..4be484dff 100644 --- a/auto_tutorial/plot_dbegin_options_zipmap.html +++ b/auto_tutorial/plot_dbegin_options_zipmap.html @@ -1,452 +1,288 @@ - - - - - - - - - Choose appropriate output of a classifier — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - + + + + + + + + + Choose appropriate output of a classifier - sklearn-onnx 1.14.0 documentation + + - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- -
- - - - - - - - -
- - + +
+
+
+ + + + + Back to top + +
- -
- -
- - +
+

Choose appropriate output of a classifier#

@@ -455,51 +291,39 @@ into a list of dictionaries where each probabily is mapped to its class id or name. That mechanism retains the class names but is slower. Let’s see what other options are available.

-
-

Train a model and convert it#

+

Train a model and convert it#

from timeit import repeat
 import numpy
 import sklearn
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
 import onnxruntime as rt
 import onnx
 import skl2onnx
 from skl2onnx.common.data_types import FloatTensorType
 from skl2onnx import to_onnx
-from sklearn.linear_model import LogisticRegression
-from sklearn.multioutput import MultiOutputClassifier
+from sklearn.linear_model import LogisticRegression
+from sklearn.multioutput import MultiOutputClassifier
 
-iris = load_iris()
+iris = load_iris()
 X, y = iris.data, iris.target
 X = X.astype(numpy.float32)
 y = y * 2 + 10  # to get labels different from [0, 1, 2]
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-clr = LogisticRegression(max_iter=500)
-clr.fit(X_train, y_train)
-print(clr)
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+clr = LogisticRegression(max_iter=500)
+clr.fit(X_train, y_train)
+print(clr)
 
-onx = to_onnx(clr, X_train, target_opset=12)
+onx = to_onnx(clr, X_train, target_opset=12)
 
-

Out:

LogisticRegression(max_iter=500)
 
-

Default behaviour: zipmap=True#

+

Default behaviour: zipmap=True#

The output type for the probabilities is a list of dictionaries.

-

Out:

-
[{10: 0.9601631164550781, 12: 0.039836231619119644, 14: 7.069097023304494e-07}, {10: 0.9455938339233398, 12: 0.054405875504016876, 14: 2.9202584528320585e-07}]
+
[{10: 0.02909434586763382, 12: 0.9069037437438965, 14: 0.06400192528963089}, {10: 0.001469946000725031, 12: 0.6960583329200745, 14: 0.30247175693511963}]
 probabilities type: <class 'list'>
 type for the first observations: <class 'dict'>
 
-

Option zipmap=False#

+

Option zipmap=False#

Probabilities are now a matrix.

initial_type = [('float_input', FloatTensorType([None, 4]))]
-options = {id(clr): {'zipmap': False}}
-onx2 = to_onnx(clr, X_train, options=options, target_opset=12)
+options = {id(clr): {'zipmap': False}}
+onx2 = to_onnx(clr, X_train, options=options, target_opset=12)
 
 sess2 = rt.InferenceSession(onx2.SerializeToString())
 res2 = sess2.run(None, {'X': X_test})
@@ -530,21 +353,20 @@ 

Option zipmap=Falseprint("type for the first observations:", type(res2[1][0]))

-

Out:

-
[[9.6016312e-01 3.9836232e-02 7.0690970e-07]
- [9.4559383e-01 5.4405876e-02 2.9202585e-07]]
+
[[0.02909435 0.90690374 0.06400193]
+ [0.00146995 0.69605833 0.30247176]]
 probabilities type: <class 'numpy.ndarray'>
 type for the first observations: <class 'numpy.ndarray'>
 
-

Option zipmap=’columns’#

+

Option zipmap=’columns’#

This options removes the final operator ZipMap and splits the probabilities into columns. The final model produces one output for the label, and one output per class.

-
options = {id(clr): {'zipmap': 'columns'}}
-onx3 = to_onnx(clr, X_train, options=options, target_opset=12)
+
options = {id(clr): {'zipmap': 'columns'}}
+onx3 = to_onnx(clr, X_train, options=options, target_opset=12)
 
 sess3 = rt.InferenceSession(onx3.SerializeToString())
 res3 = sess3.run(None, {'X': X_test})
@@ -553,16 +375,15 @@ 

Option zipmap=’columns’out.name, res3[i].shape, res3[i][:2]))

-

Out:

-
output: 'output_label' shape=(38,) values=[10 10]...
-output: 'i10' shape=(38,) values=[0.9601631  0.94559383]...
-output: 'i12' shape=(38,) values=[0.03983623 0.05440588]...
-output: 'i14' shape=(38,) values=[7.0690970e-07 2.9202585e-07]...
+
output: 'output_label' shape=(38,) values=[12 12]...
+output: 'i10' shape=(38,) values=[0.02909435 0.00146995]...
+output: 'i12' shape=(38,) values=[0.90690374 0.69605833]...
+output: 'i14' shape=(38,) values=[0.06400193 0.30247176]...
 
-

Let’s compare prediction time#

+

Let’s compare prediction time#

print("Average time with ZipMap:")
 print(sum(repeat(lambda: sess.run(None, {'X': X_test}),
                  number=100, repeat=10)) / 10)
@@ -583,25 +404,24 @@ 

Let’s compare prediction time# many times the same information with onnxruntime.

-

Out:

Average time with ZipMap:
-0.005073529999998527
+0.003929830000015499
 Average time without ZipMap:
-0.0016709999999960702
+0.0014273400999172737
 Average time without ZipMap but with columns:
-0.0030088799999930414
+0.002505180000025575
 
-

Option zimpap=False and output_class_labels=True#

+

Option zimpap=False and output_class_labels=True#

Option zipmap=False seems a better choice because it is much faster but labels are lost in the process. Option output_class_labels can be used to expose the labels as a third output.

initial_type = [('float_input', FloatTensorType([None, 4]))]
-options = {id(clr): {'zipmap': False, 'output_class_labels': True}}
-onx4 = to_onnx(clr, X_train, options=options, target_opset=12)
+options = {id(clr): {'zipmap': False, 'output_class_labels': True}}
+onx4 = to_onnx(clr, X_train, options=options, target_opset=12)
 
 sess4 = rt.InferenceSession(onx4.SerializeToString())
 res4 = sess4.run(None, {'X': X_test})
@@ -610,9 +430,8 @@ 

Option zimpap=False and output_class_labe print("class labels:", res4[2])

-

Out:

-
[[9.6016312e-01 3.9836232e-02 7.0690970e-07]
- [9.4559383e-01 5.4405876e-02 2.9202585e-07]]
+
[[0.02909435 0.90690374 0.06400193]
+ [0.00146995 0.69605833 0.30247176]]
 probabilities type: <class 'numpy.ndarray'>
 class labels: [10 12 14]
 
@@ -623,14 +442,13 @@

Option zimpap=False and output_class_labe number=100, repeat=10)) / 10)

-

Out:

Average time without ZipMap but with output_class_labels:
-0.0029111999999997806
+0.002399530100001357
 
-

MultiOutputClassifier#

+

MultiOutputClassifier#

This model is equivalent to several classifiers, one for every label to predict. Instead of returning a matrix of probabilities, it returns a sequence of matrices. Let’s first modify the labels to get @@ -640,7 +458,6 @@

MultiOutputClassifierprint(y[:5])

-

Out:

[[  10 1000]
  [  10  110]
  [  10  110]
@@ -649,35 +466,34 @@ 

MultiOutputClassifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
-clr = MultiOutputClassifier(LogisticRegression(max_iter=500))
-clr.fit(X_train, y_train)
-print(clr)
+
X_train, X_test, y_train, y_test = train_test_split(X, y)
+clr = MultiOutputClassifier(LogisticRegression(max_iter=500))
+clr.fit(X_train, y_train)
+print(clr)
 
-onx5 = to_onnx(clr, X_train, target_opset=12)
+onx5 = to_onnx(clr, X_train, target_opset=12)
 
 sess5 = rt.InferenceSession(onx5.SerializeToString())
 res5 = sess5.run(None, {'X': X_test[:3]})
 print(res5)
 
-

Out:

MultiOutputClassifier(estimator=LogisticRegression(max_iter=500))
-D:\github\onnx\sklearn-onnx\skl2onnx\_parse.py:528: UserWarning: Option zipmap is ignored for model <class 'sklearn.multioutput.MultiOutputClassifier'>. Set option zipmap to False to remove this message.
+/home/xadupre/github/sklearn-onnx/skl2onnx/_parse.py:529: UserWarning: Option zipmap is ignored for model <class 'sklearn.multioutput.MultiOutputClassifier'>. Set option zipmap to False to remove this message.
   warnings.warn(
-[array([[ 12, 112],
-       [ 14, 114],
-       [ 14, 114]], dtype=int64), [array([[1.7433858e-02, 9.2380190e-01, 5.8764238e-02],
-       [8.0360842e-05, 1.3341965e-01, 8.6649996e-01],
-       [9.2398236e-04, 3.4383032e-01, 6.5524566e-01]], dtype=float32), array([[1.44697949e-02, 5.88752866e-01, 1.22261584e-01, 2.74515748e-01],
-       [4.41888697e-04, 2.33344465e-01, 6.36498570e-01, 1.29715100e-01],
-       [2.21832423e-03, 2.89401174e-01, 4.72904593e-01, 2.35475823e-01]],
+[array([[ 10, 110],
+       [ 10, 110],
+       [ 10, 110]], dtype=int64), [array([[9.5487159e-01, 4.5127936e-02, 4.7449998e-07],
+       [9.7273737e-01, 2.7262522e-02, 1.5831836e-07],
+       [9.6799171e-01, 3.2008070e-02, 2.2276080e-07]], dtype=float32), array([[7.2772932e-01, 8.8738047e-02, 1.0638156e-04, 1.8342626e-01],
+       [8.3462042e-01, 6.5695420e-02, 6.4909793e-05, 9.9619307e-02],
+       [7.9595613e-01, 6.4299725e-02, 6.8941692e-05, 1.3967523e-01]],
       dtype=float32)]]
 

Option zipmap is ignored. Labels are missing but they can be added back as a third output.

-
onx6 = to_onnx(clr, X_train, target_opset=12,
+
onx6 = to_onnx(clr, X_train, target_opset=12,
                options={'zipmap': False, 'output_class_labels': True})
 
 sess6 = rt.InferenceSession(onx6.SerializeToString())
@@ -687,15 +503,14 @@ 

MultiOutputClassifierprint("class labels", res6[2])

-

Out:

-
predicted labels [[ 12 112]
- [ 14 114]
- [ 14 114]]
-predicted probabilies [array([[1.7433858e-02, 9.2380190e-01, 5.8764238e-02],
-       [8.0360842e-05, 1.3341965e-01, 8.6649996e-01],
-       [9.2398236e-04, 3.4383032e-01, 6.5524566e-01]], dtype=float32), array([[1.44697949e-02, 5.88752866e-01, 1.22261584e-01, 2.74515748e-01],
-       [4.41888697e-04, 2.33344465e-01, 6.36498570e-01, 1.29715100e-01],
-       [2.21832423e-03, 2.89401174e-01, 4.72904593e-01, 2.35475823e-01]],
+
predicted labels [[ 10 110]
+ [ 10 110]
+ [ 10 110]]
+predicted probabilies [array([[9.5487159e-01, 4.5127936e-02, 4.7449998e-07],
+       [9.7273737e-01, 2.7262522e-02, 1.5831836e-07],
+       [9.6799171e-01, 3.2008070e-02, 2.2276080e-07]], dtype=float32), array([[7.2772932e-01, 8.8738047e-02, 1.0638156e-04, 1.8342626e-01],
+       [8.3462042e-01, 6.5695420e-02, 6.4909793e-05, 9.9619307e-02],
+       [7.9595613e-01, 6.4299725e-02, 6.8941692e-05, 1.3967523e-01]],
       dtype=float32)]
 class labels [array([10, 12, 14], dtype=int64), array([ 110,  112,  114, 1000], dtype=int64)]
 
@@ -708,19 +523,15 @@

MultiOutputClassifierprint("skl2onnx: ", skl2onnx.__version__)

-

Out:

-
numpy: 1.21.3
-scikit-learn: 1.1.1
-onnx:  1.12.0
-onnxruntime:  1.10.0
-skl2onnx:  1.11.2
+
numpy: 1.23.5
+scikit-learn: 1.3.dev0
+onnx:  1.14.0
+onnxruntime:  1.15.0+cpu
+skl2onnx:  1.14.0
 
-

Total running time of the script: ( 0 minutes 0.382 seconds)

- +
- -
- - +
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_ebegin_float_double.html b/auto_tutorial/plot_ebegin_float_double.html index 55ca02008..8a44c23cd 100644 --- a/auto_tutorial/plot_ebegin_float_double.html +++ b/auto_tutorial/plot_ebegin_float_double.html @@ -1,892 +1,657 @@ - - - - - - - - - Issues when switching to float — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Issues when switching to float#

-

Most models in scikit-learn do computation with double, -not float. Most models in deep learning use float because -that’s the most common situation with GPU. ONNX was initially -created to facilitate the deployment of deep learning models -and that explains why many converters assume the converted models -should use float. That assumption does not usually harm -the predictions, the conversion to float introduce small -discrepencies compare to double predictions. -That assumption is usually true if the prediction -function is continuous, y = f(x), then -dy = f'(x) dx. We can determine an upper bound -to the discrepencies : -\Delta(y) \leqslant \sup_x \left\Vert f'(x)\right\Vert dx. -dx is the discrepency introduced by a float conversion, -dx = x - numpy.float32(x).

-

However, that’s not the case for every model. A decision tree -trained for a regression is not a continuous function. Therefore, -even a small dx may introduce a huge discrepency. Let’s look into -an example which always produces discrepencies and some ways -to overcome this situation.

- -
-

More into the issue#

-

The below example is built to fail. -It contains integer features with different order -of magnitude rounded to integer. A decision tree compares -features to thresholds. In most cases, float and double -comparison gives the same result. We denote -[x]_{f32} the conversion (or cast) -numpy.float32(x).

-
-

x \leqslant y = [x]_{f32} \leqslant [y]_{f32}

-

However, the probability that both comparisons give -different results is not null. The following graph shows -the discord areas.

-
from mlprodict.sklapi import OnnxPipeline
-from skl2onnx.sklapi import CastTransformer, CastRegressor
-from skl2onnx import to_onnx
-from mlprodict.onnx_conv import to_onnx as to_onnx_extended
-from mlprodict.onnxrt import OnnxInference
-from onnxruntime import InferenceSession
-from sklearn.model_selection import train_test_split
-from sklearn.tree import DecisionTreeRegressor
-from sklearn.preprocessing import StandardScaler
-from sklearn.pipeline import Pipeline
-from sklearn.datasets import make_regression
-import numpy
-import matplotlib.pyplot as plt
-
-
-def area_mismatch_rule(N, delta, factor, rule=None):
-    if rule is None:
-        def rule(t): return numpy.float32(t)
-    xst = []
-    yst = []
-    xsf = []
-    ysf = []
-    for x in range(-N, N):
-        for y in range(-N, N):
-            dx = (1. + x * delta) * factor
-            dy = (1. + y * delta) * factor
-            c1 = 1 if numpy.float64(dx) <= numpy.float64(dy) else 0
-            c2 = 1 if numpy.float32(dx) <= rule(dy) else 0
-            key = abs(c1 - c2)
-            if key == 1:
-                xsf.append(dx)
-                ysf.append(dy)
-            else:
-                xst.append(dx)
-                yst.append(dy)
-    return xst, yst, xsf, ysf
-
-
-delta = 36e-10
-factor = 1
-xst, yst, xsf, ysf = area_mismatch_rule(100, delta, factor)
-
-
-fig, ax = plt.subplots(1, 1, figsize=(5, 5))
-ax.plot(xst, yst, '.', label="agree")
-ax.plot(xsf, ysf, '.', label="disagree")
-ax.set_title("Region where x <= y and (float)x <= (float)y agree")
-ax.set_xlabel("x")
-ax.set_ylabel("y")
-ax.plot([min(xst), max(xst)], [min(yst), max(yst)], 'k--')
-ax.legend()
-
-
-Region where x <= y and (float)x <= (float)y agree

Out:

-
<matplotlib.legend.Legend object at 0x000001A9E78D2F40>
-
-
-
-
-

The pipeline and the data#

-

We can now build an example where the learned decision tree -does many comparisons in this discord area. This is done -by rounding features to integers, a frequent case -happening when dealing with categorical features.

-
X, y = make_regression(10000, 10)
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-
-Xi_train, yi_train = X_train.copy(), y_train.copy()
-Xi_test, yi_test = X_test.copy(), y_test.copy()
-for i in range(X.shape[1]):
-    Xi_train[:, i] = (Xi_train[:, i] * 2 ** i).astype(numpy.int64)
-    Xi_test[:, i] = (Xi_test[:, i] * 2 ** i).astype(numpy.int64)
-
-max_depth = 10
-
-model = Pipeline([
-    ('scaler', StandardScaler()),
-    ('dt', DecisionTreeRegressor(max_depth=max_depth))
-])
-
-model.fit(Xi_train, yi_train)
-
-
-
-
Pipeline(steps=[('scaler', StandardScaler()),
-                ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

The discrepencies#

-

Let’s reuse the function implemented in the -first example Comparison and -look into the conversion.

-
def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-onx = to_onnx(model, Xi_train[:1].astype(numpy.float32))
-
-sess = InferenceSession(onx.SerializeToString())
-
-X32 = Xi_test.astype(numpy.float32)
-
-skl = model.predict(X32)
-ort = sess.run(None, {'X': X32})[0]
-
-print(diff(skl, ort))
-
-
-

Out:

-
(230.44514830504892, 5.202494497892177)
-
-
-

The discrepencies are significant. -The ONNX model keeps float at every step.

-
-

In scikit-learn:

-
-
-
-

CastTransformer#

-

We could try to use double everywhere. Unfortunately, -:epkg:`ONNX ML Operators` only allows float coefficients -for the operator TreeEnsembleRegressor. We may want -to compromise by casting the output of the normalizer into -float in the scikit-learn pipeline.

-
- -
-
Pipeline(steps=[('scaler', StandardScaler()), ('cast', CastTransformer()),
-                ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-

The discrepencies.

-
onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32))
-
-sess2 = InferenceSession(onx2.SerializeToString())
-
-skl2 = model2.predict(X32)
-ort2 = sess2.run(None, {'X': X32})[0]
-
-print(diff(skl2, ort2))
-
-
-

Out:

-
(230.44514830504892, 5.202494497892176)
-
-
-

That still fails because the normalizer -in scikit-learn and in ONNX -use different types. The cast still happens and -the dx is still here. To remove it, we need to use -double in ONNX normalizer.

-
model3 = Pipeline([
-    ('cast64', CastTransformer(dtype=numpy.float64)),
-    ('scaler', StandardScaler()),
-    ('cast', CastTransformer()),
-    ('dt', DecisionTreeRegressor(max_depth=max_depth))
-])
-
-model3.fit(Xi_train, yi_train)
-onx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32),
-               options={StandardScaler: {'div': 'div_cast'}})
-
-sess3 = InferenceSession(onx3.SerializeToString())
-
-skl3 = model3.predict(X32)
-ort3 = sess3.run(None, {'X': X32})[0]
-
-print(diff(skl3, ort3))
-
-
-

Out:

-
(3.0013221589797467e-05, 5.45871186188065e-08)
-
-
-

It works. That also means that it is difficult to change -the computation type when a pipeline includes a discontinuous -function. It is better to keep the same types all along -before using a decision tree.

-
-
-

Sledgehammer#

-

The idea here is to always train the next step based -on ONNX outputs. That way, every step of the pipeline -is trained based on ONNX output.

-
    -
  • Trains the first step.

  • -
  • Converts the step into ONNX

  • -
  • Computes ONNX outputs.

  • -
  • Trains the second step on these outputs.

  • -
  • Converts the second step into ONNX.

  • -
  • Merges it with the first step.

  • -
  • Computes ONNX outputs of the merged two first steps.

  • -
  • -
-

It is implemented in -class OnnxPipeline.

-
model_onx = OnnxPipeline([
-    ('scaler', StandardScaler()),
-    ('dt', DecisionTreeRegressor(max_depth=max_depth))
-])
-
-model_onx.fit(Xi_train, yi_train)
-
-
-
-
OnnxPipeline(steps=[('scaler',
-                     OnnxTransformer(onnx_bytes=b'\x08\x08\x12\x08skl2onnx\x1a\x061.11.2"\x07ai.onnx(\x002\x00:\xf6\x01\n\xa6\x01\n\x01X\x12\x08variable\x1a\x06Scaler"\x06Scaler*=\n\x06offset=o\x12\x83\xbb=\x9f\x86\x02\xbd=\x7fE\x11\xbd=P\xd7\xed\xbd=\xff\xd7\xa6\xbe= Aq\xbe=!\xfa\xc8==$\x03\x93\xbf=\xbd\xe3\xb4?= \xf7\x1a?\xa0\x01\x06*<\n\x...x950\x84==\xd65\x00==\x03Q\x81<=\x86\x99\xfe;=\xd7\xbb};=\xa3\x8a\xfe:\xa0\x01\x06:\nai.onnx.ml\x12\x1emlprodict_ONNX(StandardScaler)Z\x11\n\x01X\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nb\x18\n\x08variable\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nB\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x0f')),
-                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-

The conversion.

-
try:
-    onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32))
-except ValueError as e:
-    print("Failing due to %r.\nYou need to update mlprodict." % e)
-    import sys
-    sys.exit(0)
-
-sess4 = InferenceSession(onx4.SerializeToString())
-
-skl4 = model_onx.predict(X32)
-ort4 = sess4.run(None, {'X': X32})[0]
-
-print(diff(skl4, ort4))
-
-
-

Out:

-
(3.0013221589797467e-05, 5.45871186188065e-08)
-
-
-

It works too in a more simple way.

-
-
-

No discrepencies at all?#

-

Is it possible to get no error at all? -There is one major obstacle: scikit-learn -stores the predicted values in every leave with double -(_tree.pyx - _get_value_ndarray), ONNX defines the -the predicted values as floats: :epkg:`TreeEnsembleRegressor`. -What can we do to solve it? -What if we could extend ONNX specifications to support -double instead of floats. -We reuse what was developped in example -Other way to convert -and a custom ONNX node TreeEnsembleRegressorDouble.

-
tree = DecisionTreeRegressor(max_depth=max_depth)
-tree.fit(Xi_train, yi_train)
-
-model_onx = to_onnx_extended(tree, Xi_train[:1].astype(numpy.float64),
-                             rewrite_ops=True)
-
-oinf5 = OnnxInference(model_onx, runtime='python_compiled')
-print(oinf5)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # inputs
-        X = dict_inputs['X']
-        (variable, ) = n0_treeensembleregressordouble(X)
-        return {
-            'variable': variable,
-        }
-
-
-

Let’s measure the discrepencies.

-
X64 = Xi_test.astype(numpy.float64)
-skl5 = tree.predict(X64)
-ort5 = oinf5.run({'X': X64})['variable']
-
-
-

Perfect, no discrepencies at all.

-
print(diff(skl5, ort5))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-
-
-

CastRegressor#

-

The previous example demonstrated the type difference for -the predicted values explains the small differences between -scikit-learn and onnxruntime. But it does not -with the current ONNX. Another option is to cast the -the predictions into floats in the scikit-learn pipeline.

-
ctree = CastRegressor(DecisionTreeRegressor(max_depth=max_depth))
-ctree.fit(Xi_train, yi_train)
-
-onx6 = to_onnx(ctree, Xi_train[:1].astype(numpy.float32))
-
-sess6 = InferenceSession(onx6.SerializeToString())
-
-skl6 = ctree.predict(X32)
-ort6 = sess6.run(None, {'X': X32})[0]
-
-print(diff(skl6, ort6))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-

Success!

-

Total running time of the script: ( 0 minutes 0.825 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Issues when switching to float - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Issues when switching to float#

+

Most models in scikit-learn do computation with double, +not float. Most models in deep learning use float because +that’s the most common situation with GPU. ONNX was initially +created to facilitate the deployment of deep learning models +and that explains why many converters assume the converted models +should use float. That assumption does not usually harm +the predictions, the conversion to float introduce small +discrepencies compare to double predictions. +That assumption is usually true if the prediction +function is continuous, y = f(x), then +dy = f'(x) dx. We can determine an upper bound +to the discrepencies : +\Delta(y) \leqslant \sup_x \left\Vert f'(x)\right\Vert dx. +dx is the discrepency introduced by a float conversion, +dx = x - numpy.float32(x).

+

However, that’s not the case for every model. A decision tree +trained for a regression is not a continuous function. Therefore, +even a small dx may introduce a huge discrepency. Let’s look into +an example which always produces discrepencies and some ways +to overcome this situation.

+
+

More into the issue#

+

The below example is built to fail. +It contains integer features with different order +of magnitude rounded to integer. A decision tree compares +features to thresholds. In most cases, float and double +comparison gives the same result. We denote +[x]_{f32} the conversion (or cast) +numpy.float32(x).

+
+
+

x \leqslant y = [x]_{f32} \leqslant [y]_{f32}

+
+

However, the probability that both comparisons give +different results is not null. The following graph shows +the discord areas.

+
from mlprodict.sklapi import OnnxPipeline
+from skl2onnx.sklapi import CastTransformer
+from skl2onnx import to_onnx
+from onnxruntime import InferenceSession
+from sklearn.model_selection import train_test_split
+from sklearn.tree import DecisionTreeRegressor
+from sklearn.preprocessing import StandardScaler
+from sklearn.pipeline import Pipeline
+from sklearn.datasets import make_regression
+import numpy
+import matplotlib.pyplot as plt
+
+
+def area_mismatch_rule(N, delta, factor, rule=None):
+    if rule is None:
+        def rule(t): return numpy.float32(t)
+    xst = []
+    yst = []
+    xsf = []
+    ysf = []
+    for x in range(-N, N):
+        for y in range(-N, N):
+            dx = (1. + x * delta) * factor
+            dy = (1. + y * delta) * factor
+            c1 = 1 if numpy.float64(dx) <= numpy.float64(dy) else 0
+            c2 = 1 if numpy.float32(dx) <= rule(dy) else 0
+            key = abs(c1 - c2)
+            if key == 1:
+                xsf.append(dx)
+                ysf.append(dy)
+            else:
+                xst.append(dx)
+                yst.append(dy)
+    return xst, yst, xsf, ysf
+
+
+delta = 36e-10
+factor = 1
+xst, yst, xsf, ysf = area_mismatch_rule(100, delta, factor)
+
+
+fig, ax = plt.subplots(1, 1, figsize=(5, 5))
+ax.plot(xst, yst, '.', label="agree")
+ax.plot(xsf, ysf, '.', label="disagree")
+ax.set_title("Region where x <= y and (float)x <= (float)y agree")
+ax.set_xlabel("x")
+ax.set_ylabel("y")
+ax.plot([min(xst), max(xst)], [min(yst), max(yst)], 'k--')
+ax.legend()
+
+
+Region where x <= y and (float)x <= (float)y agree
<matplotlib.legend.Legend object at 0x7ff2658ec880>
+
+
+
+
+

The pipeline and the data#

+

We can now build an example where the learned decision tree +does many comparisons in this discord area. This is done +by rounding features to integers, a frequent case +happening when dealing with categorical features.

+
X, y = make_regression(10000, 10)
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+Xi_train, yi_train = X_train.copy(), y_train.copy()
+Xi_test, yi_test = X_test.copy(), y_test.copy()
+for i in range(X.shape[1]):
+    Xi_train[:, i] = (Xi_train[:, i] * 2 ** i).astype(numpy.int64)
+    Xi_test[:, i] = (Xi_test[:, i] * 2 ** i).astype(numpy.int64)
+
+max_depth = 10
+
+model = Pipeline([
+    ('scaler', StandardScaler()),
+    ('dt', DecisionTreeRegressor(max_depth=max_depth))
+])
+
+model.fit(Xi_train, yi_train)
+
+
+
+
Pipeline(steps=[('scaler', StandardScaler()),
+                ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

The discrepencies#

+

Let’s reuse the function implemented in the +first example Comparison and +look into the conversion.

+
def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+onx = to_onnx(model, Xi_train[:1].astype(numpy.float32),
+              target_opset=15)
+
+sess = InferenceSession(onx.SerializeToString())
+
+X32 = Xi_test.astype(numpy.float32)
+
+skl = model.predict(X32)
+ort = sess.run(None, {'X': X32})[0]
+
+print(diff(skl, ort))
+
+
+
(191.7381674075899, 3.336255213062701)
+
+
+

The discrepencies are significant. +The ONNX model keeps float at every step.

+
+

In scikit-learn:

+
+
+
+

CastTransformer#

+

We could try to use double everywhere. Unfortunately, +:epkg:`ONNX ML Operators` only allows float coefficients +for the operator TreeEnsembleRegressor. We may want +to compromise by casting the output of the normalizer into +float in the scikit-learn pipeline.

+
+
model2 = Pipeline([
+    ('scaler', StandardScaler()),
+    ('cast', CastTransformer()),
+    ('dt', DecisionTreeRegressor(max_depth=max_depth))
+])
+
+model2.fit(Xi_train, yi_train)
+
+
+
+
Pipeline(steps=[('scaler', StandardScaler()), ('cast', CastTransformer()),
+                ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+

The discrepencies.

+
onx2 = to_onnx(model2, Xi_train[:1].astype(numpy.float32),
+               target_opset=15)
+
+sess2 = InferenceSession(onx2.SerializeToString())
+
+skl2 = model2.predict(X32)
+ort2 = sess2.run(None, {'X': X32})[0]
+
+print(diff(skl2, ort2))
+
+
+
(191.7381674075899, 3.336255213062701)
+
+
+

That still fails because the normalizer +in scikit-learn and in ONNX +use different types. The cast still happens and +the dx is still here. To remove it, we need to use +double in ONNX normalizer.

+
model3 = Pipeline([
+    ('cast64', CastTransformer(dtype=numpy.float64)),
+    ('scaler', StandardScaler()),
+    ('cast', CastTransformer()),
+    ('dt', DecisionTreeRegressor(max_depth=max_depth))
+])
+
+model3.fit(Xi_train, yi_train)
+onx3 = to_onnx(model3, Xi_train[:1].astype(numpy.float32),
+               options={StandardScaler: {'div': 'div_cast'}},
+               target_opset=15)
+
+sess3 = InferenceSession(onx3.SerializeToString())
+
+skl3 = model3.predict(X32)
+ort3 = sess3.run(None, {'X': X32})[0]
+
+print(diff(skl3, ort3))
+
+
+
(1.884853452338575e-05, 5.703614863001166e-08)
+
+
+

It works. That also means that it is difficult to change +the computation type when a pipeline includes a discontinuous +function. It is better to keep the same types all along +before using a decision tree.

+
+
+

Sledgehammer#

+

The idea here is to always train the next step based +on ONNX outputs. That way, every step of the pipeline +is trained based on ONNX output.

+
    +
  • Trains the first step.

  • +
  • Converts the step into ONNX

  • +
  • Computes ONNX outputs.

  • +
  • Trains the second step on these outputs.

  • +
  • Converts the second step into ONNX.

  • +
  • Merges it with the first step.

  • +
  • Computes ONNX outputs of the merged two first steps.

  • +
  • +
+

It is implemented in +class OnnxPipeline.

+
model_onx = OnnxPipeline([
+    ('scaler', StandardScaler()),
+    ('dt', DecisionTreeRegressor(max_depth=max_depth))
+])
+
+model_onx.fit(Xi_train, yi_train)
+
+
+
+
OnnxPipeline(steps=[('scaler',
+                     OnnxTransformer(onnx_bytes=b'\x08\x08\x12\x08skl2onnx\x1a\x061.14.0"\x07ai.onnx(\x002\x00:\xf6\x01\n\xa6\x01\n\x01X\x12\x08variable\x1a\x06Scaler"\x06Scaler*=\n\x06offset=e\xcf\x8b\xb9=uLp<=\x7f\x8fg<=\x10\xe97\xbe=>\x9e\x03\xbe=\t@I?=b\xeb\xac\xbe=-!_\xc0=KY\x06\xbe=\xf0\xcc\x91\xbf\xa0\x01\x06*<\n\x05scale=xD\xb9?=\x...xa7\x91\x84==*\xf1\x03==s\x19\x80<=\x05\x92\xfe;=\x0e\x00\x81;=y4\x00;\xa0\x01\x06:\nai.onnx.ml\x12\x1emlprodict_ONNX(StandardScaler)Z\x11\n\x01X\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nb\x18\n\x08variable\x12\x0c\n\n\x08\x01\x12\x06\n\x00\n\x02\x08\nB\x0e\n\nai.onnx.ml\x10\x01B\x04\n\x00\x10\x11')),
+                    ('dt', DecisionTreeRegressor(max_depth=10))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+

By using opset 17 and opset 3 for domain ai.onnx.ml, the tree thresholds +can be stored as double and not float anymore. That lowerss the discrepancies +even if the outputs are still float.

+
onx4 = to_onnx(model_onx, Xi_train[:1].astype(numpy.float32),
+               target_opset=17)
+
+sess4 = InferenceSession(onx4.SerializeToString())
+
+skl4 = model_onx.predict(X32)
+ort4 = sess4.run(None, {'X': X32})[0]
+
+print(diff(skl4, ort4))
+
+
+
(1.884853452338575e-05, 5.7036148848472986e-08)
+
+
+

Total running time of the script: ( 0 minutes 0.838 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_fbegin_investigate.html b/auto_tutorial/plot_fbegin_investigate.html index 73e1230cf..3209957cd 100644 --- a/auto_tutorial/plot_fbegin_investigate.html +++ b/auto_tutorial/plot_fbegin_investigate.html @@ -1,858 +1,728 @@ - - - - - - - - - Intermediate results and investigation — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - - - -
- -
- - -
- - - - -
- -
- - -
-

Intermediate results and investigation#

-

There are many reasons why a user wants more than using -the converted model into ONNX. Intermediate results may be -needed, the output of every node in the graph. The ONNX -may need to be altered to remove some nodes. -Transfer learning is usually removing the last layers of -a deep neural network. Another reaason is debugging. -It often happens that the runtime fails to compute the predictions -due to a shape mismatch. Then it is useful the get the shape -of every intermediate result. This example looks into two -ways of doing it.

- -
-

Look into pipeline steps#

-

The first way is a tricky one: it overloads -methods transform, predict and predict_proba -to keep a copy of inputs and outputs. It then goes -through every step of the pipeline. If the pipeline -has n steps, it converts the pipeline with step 1, -then the pipeline with steps 1, 2, then 1, 2, 3…

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-import numpy
-from onnxruntime import InferenceSession
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-from sklearn.cluster import KMeans
-from sklearn.datasets import load_iris
-from skl2onnx import to_onnx
-from skl2onnx.helpers import collect_intermediate_steps
-from skl2onnx.common.data_types import FloatTensorType
-
-
-

The pipeline.

-
data = load_iris()
-X = data.data
-
-pipe = Pipeline(steps=[
-    ('std', StandardScaler()),
-    ('km', KMeans(3))
-])
-pipe.fit(X)
-
-
-
-
Pipeline(steps=[('std', StandardScaler()), ('km', KMeans(n_clusters=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-

The function goes through every step, -overloads the methods transform and -returns an ONNX graph for every step.

-
steps = collect_intermediate_steps(
-    pipe, "pipeline",
-    [("X", FloatTensorType([None, X.shape[1]]))])
-
-
-

We call method transform to population the -cache the overloaded methods transform keeps.

- -

Out:

-
array([[3.98940603, 0.21295824, 3.12119834],
-       [4.01793312, 0.99604549, 2.6755083 ],
-       [4.19343668, 0.65198444, 2.97416665],
-       [4.19784749, 0.9034561 , 2.88014429],
-       [4.11157152, 0.40215457, 3.30022609],
-       [3.89893116, 1.21154793, 3.50554424],
-       [4.21638048, 0.50244932, 3.14856384],
-       [3.97313411, 0.09132468, 2.99184826],
-       [4.40757189, 1.42174651, 2.92515933],
-       [4.05764261, 0.78993078, 2.79398956],
-       [3.92088109, 0.78999385, 3.32125333],
-       [4.07853631, 0.27618123, 3.0493632 ],
-       [4.16440431, 1.03497888, 2.80635045],
-       [4.63069748, 1.33482453, 3.21220972],
-       [4.14619343, 1.63865558, 3.88834965],
-       [4.49547518, 2.39898792, 4.4998303 ],
-       [4.02966144, 1.20748818, 3.60978017],
-       [3.91388548, 0.21618828, 3.05594182],
-       [3.72562039, 1.20986655, 3.34493953],
-       [4.10101938, 0.86706182, 3.50065397],
-       [3.66383713, 0.50401564, 2.80825681],
-       [3.94496718, 0.66826437, 3.27800809],
-       [4.51061335, 0.68658071, 3.58990876],
-       [3.57996434, 0.47945627, 2.55934697],
-       [3.98817445, 0.36345425, 2.96493153],
-       [3.88431906, 0.99023912, 2.55682739],
-       [3.79088782, 0.22683089, 2.8279719 ],
-       [3.89539875, 0.2947186 , 3.05970831],
-       [3.88085622, 0.25361098, 2.95425291],
-       [4.09851673, 0.65019824, 2.87745051],
-       [4.01796142, 0.80138328, 2.73238773],
-       [3.57350896, 0.52309257, 2.73361981],
-       [4.5037664 , 1.57658655, 4.11853014],
-       [4.4465301 , 1.87652483, 4.22845606],
-       [3.97906378, 0.76858489, 2.71452112],
-       [4.01986385, 0.54896332, 2.86508665],
-       [3.80064093, 0.63079314, 3.0573692 ],
-       [4.25136846, 0.45982568, 3.40284985],
-       [4.42052558, 1.2336976 , 3.00742655],
-       [3.90865188, 0.14580827, 2.95472117],
-       [4.01192633, 0.20261743, 3.12324651],
-       [4.64398605, 2.67055552, 2.90164193],
-       [4.42154566, 0.90927099, 3.15411688],
-       [3.70483773, 0.50081008, 2.8613548 ],
-       [3.9078554 , 0.92159916, 3.34606471],
-       [4.01421067, 1.01946042, 2.65231058],
-       [4.14238152, 0.86953764, 3.53206587],
-       [4.23577398, 0.72275914, 2.99813103],
-       [3.97409784, 0.72324305, 3.34116935],
-       [3.97223984, 0.30295342, 2.90222887],
-       [0.95288059, 3.43619989, 1.9003878 ],
-       [0.99352148, 2.97232682, 1.41851492],
-       [0.72661726, 3.51850037, 1.68457079],
-       [2.69898424, 3.33264308, 0.96940962],
-       [1.11074501, 3.35747592, 0.9112523 ],
-       [1.8143491 , 2.77550662, 0.35721918],
-       [1.00650285, 3.01808184, 1.59351202],
-       [3.31296552, 2.77360088, 1.50213315],
-       [1.14114175, 3.21148368, 1.11632078],
-       [2.42994048, 2.66294828, 0.77921299],
-       [3.73666782, 3.62389817, 1.97194958],
-       [1.45918639, 2.70011145, 0.77530513],
-       [2.74268279, 3.53658932, 1.25941769],
-       [1.28976474, 2.98813829, 0.66155141],
-       [2.05251547, 2.32311723, 0.73833453],
-       [0.98780965, 3.14311522, 1.46572707],
-       [1.67700171, 2.68234835, 0.80185102],
-       [2.12682734, 2.63954211, 0.568386  ],
-       [2.33743839, 3.97369206, 1.19987895],
-       [2.46667974, 2.87494798, 0.67881532],
-       [1.1880022 , 3.03853641, 1.34222961],
-       [1.63233668, 2.8022861 , 0.53061062],
-       [1.65142259, 3.68305664, 0.79234309],
-       [1.54593744, 2.96833851, 0.57371215],
-       [1.2933375 , 2.9760862 , 0.90589785],
-       [1.03085926, 3.13002382, 1.22490527],
-       [1.09304603, 3.56679427, 1.26783271],
-       [0.52050254, 3.5903606 , 1.42114042],
-       [1.34712856, 2.93839428, 0.58974672],
-       [2.44164622, 2.58203512, 0.76432091],
-       [2.69027665, 2.99796537, 0.89738242],
-       [2.76965187, 2.92597852, 0.98549851],
-       [2.02829879, 2.68907313, 0.3921368 ],
-       [1.4211892 , 3.42215998, 0.54223583],
-       [1.88799766, 2.62771445, 0.90567816],
-       [1.39853465, 2.75915071, 1.70872911],
-       [0.78009974, 3.30075052, 1.48190142],
-       [2.2083069 , 3.73017167, 1.06129323],
-       [1.87666989, 2.37943811, 0.81863359],
-       [2.41035271, 2.98789866, 0.599882  ],
-       [2.26782134, 2.89079656, 0.4914813 ],
-       [1.25085451, 2.86642713, 0.84409423],
-       [2.11791607, 2.86642575, 0.38941349],
-       [3.35089399, 2.96966239, 1.53271026],
-       [2.05312152, 2.77003779, 0.30831638],
-       [1.83091351, 2.38255534, 0.81726253],
-       [1.80454586, 2.55559903, 0.56428027],
-       [1.39825227, 2.8455521 , 0.72672271],
-       [3.06324547, 2.56987887, 1.28805849],
-       [1.89861511, 2.64007308, 0.38163798],
-       [1.0584579 , 4.24274589, 2.31271244],
-       [1.5185265 , 3.57067982, 0.76585766],
-       [0.52472   , 4.44150237, 2.14762671],
-       [0.77236486, 3.69480186, 1.17645413],
-       [0.53031563, 4.11613683, 1.73594932],
-       [1.2022172 , 5.03326801, 2.78128346],
-       [2.74462238, 3.3503222 , 1.22550604],
-       [0.92275933, 4.577021  , 2.2426558 ],
-       [1.40314162, 4.363498  , 1.50462864],
-       [1.48323372, 4.79334275, 3.22975724],
-       [0.4787491 , 3.62749566, 1.71837714],
-       [1.0325986 , 3.89360823, 1.10409694],
-       [0.27818948, 4.1132966 , 1.80475907],
-       [1.91870424, 3.82688169, 0.94858807],
-       [1.49910975, 3.91538879, 1.39433359],
-       [0.68622715, 3.89835633, 1.90677079],
-       [0.46463058, 3.70128288, 1.39713702],
-       [2.10127163, 5.18341242, 3.85224062],
-       [1.83092395, 5.58136629, 2.95786451],
-       [2.37017622, 4.02615768, 1.17790381],
-       [0.52540209, 4.31907679, 2.27442972],
-       [1.62249456, 3.4288432 , 0.91211061],
-       [1.47042293, 5.19031307, 2.77937737],
-       [1.15814207, 3.64273089, 0.84735471],
-       [0.520093  , 4.00723617, 2.15695444],
-       [0.66660166, 4.2637671 , 2.33581345],
-       [1.08324891, 3.45930032, 0.79774043],
-       [0.94925151, 3.27575645, 1.022307  ],
-       [0.84098317, 4.05342943, 1.3842265 ],
-       [0.75748198, 4.1585729 , 2.03854964],
-       [1.07124861, 4.71100584, 2.28297732],
-       [2.17345728, 5.12224641, 3.88774921],
-       [0.87682321, 4.13401784, 1.47357101],
-       [1.11534598, 3.39830644, 0.7964005 ],
-       [1.59782917, 3.63719075, 0.80521086],
-       [1.25982873, 5.08776655, 2.8607372 ],
-       [1.07214028, 4.00416552, 2.3101089 ],
-       [0.51434392, 3.58815834, 1.46990247],
-       [1.0762733 , 3.19454679, 0.97017134],
-       [0.23050145, 4.09907253, 1.97333575],
-       [0.57373487, 4.28416057, 2.07939567],
-       [0.51130902, 4.17402084, 2.06609741],
-       [1.5185265 , 3.57067982, 0.76585766],
-       [0.54141867, 4.32128686, 2.24723796],
-       [0.85128501, 4.3480018 , 2.42521977],
-       [0.52475835, 4.1240495 , 1.82594618],
-       [1.52100812, 3.97564407, 1.03093862],
-       [0.44371189, 3.7539635 , 1.44892686],
-       [1.08437101, 3.7969924 , 2.17585453],
-       [1.13739231, 3.25638099, 1.00508668]])
-
-
-

We compute every step and compare -ONNX and scikit-learn outputs.

-
for step in steps:
-    print('----------------------------')
-    print(step['model'])
-    onnx_step = step['onnx_step']
-    sess = InferenceSession(onnx_step.SerializeToString())
-    onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)})
-    onnx_output = onnx_outputs[-1]
-    skl_outputs = step['model']._debug.outputs['transform']
-
-    # comparison
-    diff = numpy.abs(skl_outputs.ravel() - onnx_output.ravel()).max()
-    print("difference", diff)
-
-# That was the first way: dynamically overwrite
-# every method transform or predict in a scikit-learn
-# pipeline to capture the input and output of every step,
-# compare them to the output produced by truncated ONNX
-# graphs built from the first one.
-#
-
-
-

Out:

-
----------------------------
-StandardScaler()
-difference 4.799262827148709e-07
-----------------------------
-KMeans(n_clusters=3)
-difference 1.095537650763756e-06
-
-
-
-
-

Python runtime to look into every node#

-

The python runtime may be useful to easily look -into every node of the ONNX graph. -This option can be used to check when the computation -fails due to nan values or a dimension mismatch.

-
onx = to_onnx(pipe, X[:1].astype(numpy.float32))
-
-oinf = OnnxInference(onx)
-oinf.run({'X': X[:2].astype(numpy.float32)},
-         verbose=1, fLOG=print)
-
-
-

Out:

-
+ki='Ad_Addcst': (3,) (dtype=float32 min=0.9830552339553833 max=5.035177230834961)
-+ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.1359702348709106)
-+ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0)
--- OnnxInference: run 8 nodes with 1 inputs
-Onnx-Scaler(X) -> variable    (name='Scaler')
-+kr='variable': (2, 4) (dtype=float32 min=-1.340226411819458 max=1.0190045833587646)
-Onnx-ReduceSumSquare(variable) -> Re_reduced0    (name='Re_ReduceSumSquare')
-+kr='Re_reduced0': (2, 1) (dtype=float32 min=4.850505828857422 max=5.376197338104248)
-Onnx-Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0    (name='Mu_Mul')
-+kr='Mu_C0': (2, 1) (dtype=float32 min=0.0 max=0.0)
-Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0    (name='Ge_Gemm')
-+kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.967348575592041)
-Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01    (name='Ad_Add')
-+kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.817853927612305)
-Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0    (name='Ad_Add1')
-+kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=16.143783569335938)
-Onnx-Sqrt(Ad_C0) -> scores    (name='Sq_Sqrt')
-+kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=4.017932891845703)
-Onnx-ArgMin(Ad_C0) -> label    (name='Ar_ArgMin')
-+kr='label': (2,) (dtype=int64 min=1 max=1)
-
-{'label': array([1, 1], dtype=int64), 'scores': array([[3.9894059 , 0.21295893, 3.1211984 ],
-       [4.017933  , 0.99604493, 2.675508  ]], dtype=float32)}
-
-
-

And to get a sense of the intermediate results.

-
oinf.run({'X': X[:2].astype(numpy.float32)},
-         verbose=3, fLOG=print)
-
-# This way is usually better if you need to investigate
-# issues within the code of the runtime for an operator.
-#
-
-
-

Out:

-
+ki='Ad_Addcst': (3,) (dtype=float32 min=0.9830552339553833 max=5.035177230834961
-[3.32593    5.035177   0.98305523]
-+ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.1359702348709106
-[[ 1.1359702   0.08842168  0.9961545   1.0175261 ]
- [-1.0145789   0.85326266 -1.3049873  -1.2548935 ]
- [-0.05021989 -0.8833765   0.34773782  0.2815273 ]]
-+ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0
-[0.]
--kv='X' shape=(2, 4) dtype=float32 min=0.20000000298023224 max=5.099999904632568
--- OnnxInference: run 8 nodes with 1 inputs
-Onnx-Scaler(X) -> variable    (name='Scaler')
-+kr='variable': (2, 4) (dtype=float32 min=-1.340226411819458 max=1.0190045833587646)
-[[-0.9006812   1.0190046  -1.3402264  -1.3154442 ]
- [-1.1430167  -0.13197924 -1.3402264  -1.3154442 ]]
-Onnx-ReduceSumSquare(variable) -> Re_reduced0    (name='Re_ReduceSumSquare')
-+kr='Re_reduced0': (2, 1) (dtype=float32 min=4.850505828857422 max=5.376197338104248)
-[[5.3761973]
- [4.850506 ]]
-Onnx-Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0    (name='Mu_Mul')
-+kr='Mu_C0': (2, 1) (dtype=float32 min=0.0 max=0.0)
-[[0.]
- [0.]]
-Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0    (name='Ge_Gemm')
-+kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.967348575592041)
-[[  7.2132325 -10.366023    3.3826268]
- [  7.9673486  -8.893578    1.3247827]]
-Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01    (name='Ad_Add')
-+kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.817853927612305)
-[[12.58943   -4.9898257  8.758824 ]
- [12.817854  -4.0430717  6.1752887]]
-Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0    (name='Ad_Add1')
-+kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=16.143783569335938)
-[[15.9153595   0.04535151  9.741879  ]
- [16.143784    0.9921055   7.158344  ]]
-Onnx-Sqrt(Ad_C0) -> scores    (name='Sq_Sqrt')
-+kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=4.017932891845703)
-[[3.9894059  0.21295893 3.1211984 ]
- [4.017933   0.99604493 2.675508  ]]
-Onnx-ArgMin(Ad_C0) -> label    (name='Ar_ArgMin')
-+kr='label': (2,) (dtype=int64 min=1 max=1)
-[1 1]
-[VALIDATE] type <class 'onnx.onnx_ml_pb2.ModelProto'>
-[VALIDATE] mis={}
-
-{'label': array([1, 1], dtype=int64), 'scores': array([[3.9894059 , 0.21295893, 3.1211984 ],
-       [4.017933  , 0.99604493, 2.675508  ]], dtype=float32)}
-
-
-
-
-

Final graph#

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot fbegin investigate

Total running time of the script: ( 0 minutes 0.573 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Intermediate results and investigation - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Intermediate results and investigation#

+

There are many reasons why a user wants more than using +the converted model into ONNX. Intermediate results may be +needed, the output of every node in the graph. The ONNX +may need to be altered to remove some nodes. +Transfer learning is usually removing the last layers of +a deep neural network. Another reaason is debugging. +It often happens that the runtime fails to compute the predictions +due to a shape mismatch. Then it is useful the get the shape +of every intermediate result. This example looks into two +ways of doing it.

+
+

Look into pipeline steps#

+

The first way is a tricky one: it overloads +methods transform, predict and predict_proba +to keep a copy of inputs and outputs. It then goes +through every step of the pipeline. If the pipeline +has n steps, it converts the pipeline with step 1, +then the pipeline with steps 1, 2, then 1, 2, 3…

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+import numpy
+from onnxruntime import InferenceSession
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.cluster import KMeans
+from sklearn.datasets import load_iris
+from skl2onnx import to_onnx
+from skl2onnx.helpers import collect_intermediate_steps
+from skl2onnx.common.data_types import FloatTensorType
+
+
+

The pipeline.

+
data = load_iris()
+X = data.data
+
+pipe = Pipeline(steps=[
+    ('std', StandardScaler()),
+    ('km', KMeans(3, n_init=3))
+])
+pipe.fit(X)
+
+
+
+
Pipeline(steps=[('std', StandardScaler()),
+                ('km', KMeans(n_clusters=3, n_init=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+

The function goes through every step, +overloads the methods transform and +returns an ONNX graph for every step.

+
steps = collect_intermediate_steps(
+    pipe, "pipeline",
+    [("X", FloatTensorType([None, X.shape[1]]))],
+    target_opset=17)
+
+
+

We call method transform to population the +cache the overloaded methods transform keeps.

+
pipe.transform(X)
+
+
+
array([[3.97220157, 0.21295824, 3.12241924],
+       [3.99787307, 0.99604549, 2.67093263],
+       [4.17484361, 0.65198444, 2.97003927],
+       [4.1789179 , 0.9034561 , 2.87439521],
+       [4.09527298, 0.40215457, 3.30139711],
+       [3.88570212, 1.21154793, 3.51109541],
+       [4.19951064, 0.50244932, 3.14534167],
+       [3.95532767, 0.09132468, 2.99164562],
+       [4.38817034, 1.42174651, 2.91632391],
+       [4.03786574, 0.78993078, 2.79062164],
+       [3.90506362, 0.78999385, 3.3258656 ],
+       [4.06107053, 0.27618123, 3.04773291],
+       [4.14433414, 1.03497888, 2.80131879],
+       [4.61165831, 1.33482453, 3.20416659],
+       [4.13289816, 1.63865558, 3.89627182],
+       [4.48651213, 2.39898792, 4.50815311],
+       [4.01625262, 1.20748818, 3.61501306],
+       [3.89697041, 0.21618828, 3.05706539],
+       [3.71098466, 1.20986655, 3.35219798],
+       [4.086616  , 0.86706182, 3.50365186],
+       [3.64546316, 0.50401564, 2.81135074],
+       [3.93005961, 0.66826437, 3.28039115],
+       [4.49472411, 0.68658071, 3.58831498],
+       [3.56212928, 0.47945627, 2.55875855],
+       [3.97077425, 0.36345425, 2.96339957],
+       [3.86402575, 0.99023912, 2.5530338 ],
+       [3.77367018, 0.22683089, 2.82754129],
+       [3.87807792, 0.2947186 , 3.06173653],
+       [3.86275216, 0.25361098, 2.95552455],
+       [4.07994174, 0.65019824, 2.87333352],
+       [3.99861021, 0.80138328, 2.72803208],
+       [3.55568868, 0.52309257, 2.73641792],
+       [4.49114086, 1.57658655, 4.12348735],
+       [4.43501224, 1.87652483, 4.23530312],
+       [3.95950739, 0.76858489, 2.71091534],
+       [4.00072575, 0.54896332, 2.86311402],
+       [3.78300091, 0.63079314, 3.0615144 ],
+       [4.23490423, 0.45982568, 3.40343436],
+       [4.40153286, 1.2336976 , 2.99955514],
+       [3.89069251, 0.14580827, 2.95527858],
+       [3.9951345 , 0.20261743, 3.12357878],
+       [4.62328438, 2.67055552, 2.88854512],
+       [4.40353892, 0.90927099, 3.14807862],
+       [3.68912032, 0.50081008, 2.86147193],
+       [3.8939904 , 0.92159916, 3.34925913],
+       [3.99460105, 1.01946042, 2.64670109],
+       [4.12768297, 0.86953764, 3.53518427],
+       [4.21738173, 0.72275914, 2.99333175],
+       [3.95835329, 0.72324305, 3.34508059],
+       [3.95372786, 0.30295342, 2.90117552],
+       [0.93688791, 3.43619989, 1.92126695],
+       [0.97602613, 2.97232682, 1.43652719],
+       [0.70684331, 3.51850037, 1.70536445],
+       [2.68055347, 3.33264308, 0.9535028 ],
+       [1.0882743 , 3.35747592, 0.93174089],
+       [1.79517817, 2.77550662, 0.36129564],
+       [0.99454069, 3.01808184, 1.60954394],
+       [3.2941058 , 2.77360088, 1.4844702 ],
+       [1.11736796, 3.21148368, 1.13778849],
+       [2.41310146, 2.66294828, 0.76189431],
+       [3.71811095, 3.62389817, 1.95510958],
+       [1.44115032, 2.70011145, 0.78733906],
+       [2.72209616, 3.53658932, 1.25528011],
+       [1.26899484, 2.98813829, 0.68046933],
+       [2.03366801, 2.32311723, 0.7386478 ],
+       [0.96655433, 3.14311522, 1.48632293],
+       [1.66143984, 2.68234835, 0.80567208],
+       [2.1051166 , 2.63954211, 0.57184189],
+       [2.32028627, 3.97369206, 1.19832221],
+       [2.44625761, 2.87494798, 0.66720431],
+       [1.17881254, 3.03853641, 1.35243001],
+       [1.61047248, 2.8022861 , 0.54870182],
+       [1.63287534, 3.68305664, 0.80222733],
+       [1.52405623, 2.96833851, 0.59281379],
+       [1.27009395, 2.9760862 , 0.92691417],
+       [1.00782039, 3.13002382, 1.24582165],
+       [1.07063185, 3.56679427, 1.28841255],
+       [0.49696775, 3.5903606 , 1.44058261],
+       [1.32774484, 2.93839428, 0.60609916],
+       [2.42011876, 2.58203512, 0.75979573],
+       [2.67031866, 2.99796537, 0.88332283],
+       [2.74916442, 2.92597852, 0.97295414],
+       [2.00733004, 2.68907313, 0.39450078],
+       [1.40409915, 3.42215998, 0.55254609],
+       [1.87359817, 2.62771445, 0.90409172],
+       [1.38942931, 2.75915071, 1.72093827],
+       [0.75864765, 3.30075052, 1.50231003],
+       [2.18888648, 3.73017167, 1.06440319],
+       [1.85885082, 2.37943811, 0.82259848],
+       [2.39150476, 2.98789866, 0.58180763],
+       [2.24875062, 2.89079656, 0.47613928],
+       [1.23068597, 2.86642713, 0.86151719],
+       [2.0971052 , 2.86642575, 0.38623038],
+       [3.33176642, 2.96966239, 1.51536817],
+       [2.03396275, 2.77003779, 0.29666246],
+       [1.81194891, 2.38255534, 0.82460449],
+       [1.78545021, 2.55559903, 0.57017344],
+       [1.37597729, 2.8455521 , 0.74674716],
+       [3.04410524, 2.56987887, 1.27214703],
+       [1.8789993 , 2.64007308, 0.3839694 ],
+       [1.07518249, 4.24274589, 2.3231268 ],
+       [1.50664789, 3.57067982, 0.76586458],
+       [0.53503117, 4.44150237, 2.16460668],
+       [0.75898803, 3.69480186, 1.19079906],
+       [0.53772877, 4.11613683, 1.74890266],
+       [1.21258624, 5.03326801, 2.79880246],
+       [2.73154786, 3.3503222 , 1.20493915],
+       [0.92230348, 4.577021  , 2.26064879],
+       [1.39208869, 4.363498  , 1.51591765],
+       [1.50326811, 4.79334275, 3.24565892],
+       [0.4769421 , 3.62749566, 1.73398281],
+       [1.01951679, 3.89360823, 1.11653632],
+       [0.28300998, 4.1132966 , 1.8209098 ],
+       [1.90771307, 3.82688169, 0.94077286],
+       [1.49665868, 3.91538879, 1.39463942],
+       [0.69648261, 3.89835633, 1.91923245],
+       [0.44800197, 3.70128288, 1.41396325],
+       [2.11710726, 5.18341242, 3.87020286],
+       [1.8369882 , 5.58136629, 2.97188789],
+       [2.35409464, 4.02615768, 1.17309015],
+       [0.54832687, 4.31907679, 2.29006262],
+       [1.61278191, 3.4288432 , 0.90897245],
+       [1.47513288, 5.19031307, 2.79625003],
+       [1.14174615, 3.64273089, 0.86066075],
+       [0.53620233, 4.00723617, 2.1727675 ],
+       [0.67208548, 4.2637671 , 2.35489518],
+       [1.06663455, 3.45930032, 0.81175026],
+       [0.93494533, 3.27575645, 1.03569718],
+       [0.83553248, 4.05342943, 1.39558948],
+       [0.74854302, 4.1585729 , 2.05837295],
+       [1.07047627, 4.71100584, 2.30034327],
+       [2.18621697, 5.12224641, 3.90678521],
+       [0.87437682, 4.13401784, 1.48399598],
+       [1.0947599 , 3.39830644, 0.81512611],
+       [1.58049956, 3.63719075, 0.81393069],
+       [1.27157117, 5.08776655, 2.87804494],
+       [1.08615557, 4.00416552, 2.32144511],
+       [0.50258278, 3.58815834, 1.4859412 ],
+       [1.0625801 , 3.19454679, 0.98183333],
+       [0.24502791, 4.09907253, 1.9903383 ],
+       [0.59235157, 4.28416057, 2.0929821 ],
+       [0.52380862, 4.17402084, 2.08177428],
+       [1.50664789, 3.57067982, 0.76586458],
+       [0.56488438, 4.32128686, 2.26218919],
+       [0.87242105, 4.3480018 , 2.43867573],
+       [0.53197842, 4.1240495 , 1.84004429],
+       [1.50771114, 3.97564407, 1.03723854],
+       [0.43328422, 3.7539635 , 1.46435048],
+       [1.09439379, 3.7969924 , 2.186898  ],
+       [1.12558627, 3.25638099, 1.0145515 ]])
+
+
+

We compute every step and compare +ONNX and scikit-learn outputs.

+
for step in steps:
+    print('----------------------------')
+    print(step['model'])
+    onnx_step = step['onnx_step']
+    sess = InferenceSession(onnx_step.SerializeToString(),
+                            providers=["CPUExecutionProvider"])
+    onnx_outputs = sess.run(None, {'X': X.astype(numpy.float32)})
+    onnx_output = onnx_outputs[-1]
+    skl_outputs = step['model']._debug.outputs['transform']
+
+    # comparison
+    diff = numpy.abs(skl_outputs.ravel() - onnx_output.ravel()).max()
+    print("difference", diff)
+
+# That was the first way: dynamically overwrite
+# every method transform or predict in a scikit-learn
+# pipeline to capture the input and output of every step,
+# compare them to the output produced by truncated ONNX
+# graphs built from the first one.
+#
+
+
+
----------------------------
+StandardScaler()
+difference 4.799262827148709e-07
+----------------------------
+KMeans(n_clusters=3, n_init=3)
+difference 1.095537650763756e-06
+
+
+
+
+

Python runtime to look into every node#

+

The python runtime may be useful to easily look +into every node of the ONNX graph. +This option can be used to check when the computation +fails due to nan values or a dimension mismatch.

+
onx = to_onnx(pipe, X[:1].astype(numpy.float32),
+              target_opset=17)
+
+oinf = OnnxInference(onx)
+oinf.run({'X': X[:2].astype(numpy.float32)},
+         verbose=1, fLOG=print)
+
+
+
+ki='Ad_Addcst': (3,) (dtype=float32 min=1.0029560327529907 max=5.035177230834961)
++ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.131404995918274)
++ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0)
+-- OnnxInference: run 8 nodes with 1 inputs
+Onnx-Scaler(X) -> variable    (name='Scaler')
++kr='variable': (2, 4) (dtype=float32 min=-1.340226411819458 max=1.0190045833587646)
+Onnx-ReduceSumSquare(variable) -> Re_reduced0    (name='Re_ReduceSumSquare')
++kr='Re_reduced0': (2, 1) (dtype=float32 min=4.850505828857422 max=5.376197338104248)
+Onnx-Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0    (name='Mu_Mul')
++kr='Mu_C0': (2, 1) (dtype=float32 min=0.0 max=0.0)
+Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0    (name='Ge_Gemm')
++kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.877023220062256)
+Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01    (name='Ad_Add')
++kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.727529525756836)
+Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0    (name='Ad_Add1')
++kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=15.982987403869629)
+Onnx-ArgMin(Ad_C0) -> label    (name='Ar_ArgMin')
++kr='label': (2,) (dtype=int64 min=1 max=1)
+Onnx-Sqrt(Ad_C0) -> scores    (name='Sq_Sqrt')
++kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=3.997872829437256)
+
+{'label': array([1, 1]), 'scores': array([[3.9722016 , 0.21295893, 3.1224194 ],
+       [3.9978728 , 0.99604493, 2.6709325 ]], dtype=float32)}
+
+
+

And to get a sense of the intermediate results.

+
oinf.run({'X': X[:2].astype(numpy.float32)},
+         verbose=3, fLOG=print)
+
+# This way is usually better if you need to investigate
+# issues within the code of the runtime for an operator.
+#
+
+
+
+ki='Ad_Addcst': (3,) (dtype=float32 min=1.0029560327529907 max=5.035177230834961
+[3.255458 5.035177 1.002956]
++ki='Ge_Gemmcst': (3, 4) (dtype=float32 min=-1.3049873113632202 max=1.131404995918274
+[[ 1.131405    0.07903422  0.98537153  0.9990883 ]
+ [-1.0145789   0.85326266 -1.3049873  -1.2548935 ]
+ [-0.06881714 -0.89339954  0.3452218   0.284393  ]]
++ki='Mu_Mulcst': (1,) (dtype=float32 min=0.0 max=0.0
+[0.]
+-kv='X' shape=(2, 4) dtype=float32 min=0.20000000298023224 max=5.099999904632568
+-- OnnxInference: run 8 nodes with 1 inputs
+Onnx-Scaler(X) -> variable    (name='Scaler')
++kr='variable': (2, 4) (dtype=float32 min=-1.340226411819458 max=1.0190045833587646)
+[[-0.9006812   1.0190046  -1.3402264  -1.3154442 ]
+ [-1.1430167  -0.13197924 -1.3402264  -1.3154442 ]]
+Onnx-ReduceSumSquare(variable) -> Re_reduced0    (name='Re_ReduceSumSquare')
++kr='Re_reduced0': (2, 1) (dtype=float32 min=4.850505828857422 max=5.376197338104248)
+[[5.3761973]
+ [4.850506 ]]
+Onnx-Mul(Re_reduced0, Mu_Mulcst) -> Mu_C0    (name='Mu_Mul')
++kr='Mu_C0': (2, 1) (dtype=float32 min=0.0 max=0.0)
+[[0.]
+ [0.]]
+Onnx-Gemm(variable, Ge_Gemmcst, Mu_C0) -> Ge_Y0    (name='Ge_Gemm')
++kr='Ge_Y0': (2, 3) (dtype=float32 min=-10.366023063659668 max=7.877023220062256)
+[[  7.14673   -10.366023    3.370349 ]
+ [  7.877023   -8.893578    1.2804183]]
+Onnx-Add(Re_reduced0, Ge_Y0) -> Ad_C01    (name='Ad_Add')
++kr='Ad_C01': (2, 3) (dtype=float32 min=-4.98982572555542 max=12.727529525756836)
+[[12.522927  -4.9898257  8.746546 ]
+ [12.72753   -4.0430717  6.130924 ]]
+Onnx-Add(Ad_Addcst, Ad_C01) -> Ad_C0    (name='Ad_Add1')
++kr='Ad_C0': (2, 3) (dtype=float32 min=0.045351505279541016 max=15.982987403869629)
+[[15.778385    0.04535151  9.749502  ]
+ [15.982987    0.9921055   7.13388   ]]
+Onnx-ArgMin(Ad_C0) -> label    (name='Ar_ArgMin')
++kr='label': (2,) (dtype=int64 min=1 max=1)
+[1 1]
+Onnx-Sqrt(Ad_C0) -> scores    (name='Sq_Sqrt')
++kr='scores': (2, 3) (dtype=float32 min=0.2129589319229126 max=3.997872829437256)
+[[3.9722016  0.21295893 3.1224194 ]
+ [3.9978728  0.99604493 2.6709325 ]]
+[VALIDATE] type <class 'onnx.onnx_ml_pb2.ModelProto'>
+[VALIDATE] mis={}
+
+{'label': array([1, 1]), 'scores': array([[3.9722016 , 0.21295893, 3.1224194 ],
+       [3.9978728 , 0.99604493, 2.6709325 ]], dtype=float32)}
+
+
+
+
+

Final graph#

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot fbegin investigate

Total running time of the script: ( 0 minutes 0.505 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gbegin_cst.html b/auto_tutorial/plot_gbegin_cst.html index 8198e1563..0a39c530b 100644 --- a/auto_tutorial/plot_gbegin_cst.html +++ b/auto_tutorial/plot_gbegin_cst.html @@ -1,650 +1,500 @@ - - - - - - - - - Store arrays in one onnx graph — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Store arrays in one onnx graph#

-

Once a model is converted it can be useful to store an -array as a constant in the graph an retrieve it through -an output. This allows the user to store training parameters -or other informations like a vocabulary. -Last sections shows how to remove an output or to promote -an intermediate result to an output.

- -
-

Train and convert a model#

-

We download one model from the :epkg:`ONNX Zoo` but the model -could be trained and produced by another converter library.

-
import pprint
-import numpy
-from onnx import load
-from onnxruntime import InferenceSession
-from sklearn.datasets import load_iris
-from sklearn.linear_model import LogisticRegression
-from sklearn.model_selection import train_test_split
-from skl2onnx import to_onnx
-from skl2onnx.helpers.onnx_helper import (
-    add_output_initializer, select_model_inputs_outputs)
-
-
-data = load_iris()
-X, y = data.data.astype(numpy.float32), data.target
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-model = LogisticRegression(penalty='elasticnet', C=2.,
-                           solver='saga', l1_ratio=0.5)
-model.fit(X_train, y_train)
-
-onx = to_onnx(model, X_train[:1], target_opset=12,
-              options={'zipmap': False})
-
-
-

Out:

-
D:\Program Files\Python\Python39\lib\site-packages\sklearn\linear_model\_sag.py:350: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
-  warnings.warn(
-
-
-
-
-

Add training parameter#

-
new_onx = add_output_initializer(
-    onx,
-    ['C', 'l1_ratio'],
-    [numpy.array([model.C]), numpy.array([model.l1_ratio])])
-
-
-
-
-

Inference#

-
sess = InferenceSession(new_onx.SerializeToString())
-print("output names:", [o.name for o in sess.get_outputs()])
-res = sess.run(None, {'X': X_test[:2]})
-print("outputs")
-pprint.pprint(res)
-
-
-

Out:

-
output names: ['label', 'probabilities', 'C', 'l1_ratio']
-outputs
-[array([2, 2], dtype=int64),
- array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01],
-       [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32),
- array([2.]),
- array([0.5])]
-
-
-

The major draw back of this solution is increase the prediction -time as onnxruntime copies the constants for every prediction. -It is possible either to store those constant in a separate ONNX graph -or to removes them.

-
-
-

Select outputs#

-

Next function removes unneeded outputs from a model, -not only the constants. Next model only keeps the probabilities.

-
simple_onx = select_model_inputs_outputs(new_onx, ['probabilities'])
-
-sess = InferenceSession(simple_onx.SerializeToString())
-print("output names:", [o.name for o in sess.get_outputs()])
-res = sess.run(None, {'X': X_test[:2]})
-print("outputs")
-pprint.pprint(res)
-
-# Function *select_model_inputs_outputs* add also promote an intermediate
-# result to an output.
-#
-
-
-

Out:

-
output names: ['probabilities']
-outputs
-[array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01],
-       [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32)]
-
-
-

This example only uses ONNX graph in memory and never saves or loads a -model. This can be done by using the following snippets of code.

-
-
-

Save a model#

-
with open("simplified_model.onnx", "wb") as f:
-    f.write(simple_onx.SerializeToString())
-
-
-
-
-

Load a model#

-
model = load("simplified_model.onnx", "wb")
-
-sess = InferenceSession(model.SerializeToString())
-print("output names:", [o.name for o in sess.get_outputs()])
-res = sess.run(None, {'X': X_test[:2]})
-print("outputs")
-pprint.pprint(res)
-
-
-

Out:

-
output names: ['probabilities']
-outputs
-[array([[4.6285493e-03, 4.9396357e-01, 5.0140786e-01],
-       [2.8713079e-05, 2.1343414e-02, 9.7862786e-01]], dtype=float32)]
-
-
-

Total running time of the script: ( 0 minutes 0.081 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Store arrays in one onnx graph - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Store arrays in one onnx graph#

+

Once a model is converted it can be useful to store an +array as a constant in the graph an retrieve it through +an output. This allows the user to store training parameters +or other informations like a vocabulary. +Last sections shows how to remove an output or to promote +an intermediate result to an output.

+
+

Train and convert a model#

+

We download one model from the :epkg:`ONNX Zoo` but the model +could be trained and produced by another converter library.

+
import pprint
+import numpy
+from onnx import load
+from onnxruntime import InferenceSession
+from sklearn.datasets import load_iris
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import train_test_split
+from skl2onnx import to_onnx
+from skl2onnx.helpers.onnx_helper import (
+    add_output_initializer, select_model_inputs_outputs)
+
+
+data = load_iris()
+X, y = data.data.astype(numpy.float32), data.target
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+model = LogisticRegression(penalty='elasticnet', C=2.,
+                           solver='saga', l1_ratio=0.5)
+model.fit(X_train, y_train)
+
+onx = to_onnx(model, X_train[:1], target_opset=12,
+              options={'zipmap': False})
+
+
+
/home/xadupre/github/scikit-learn/sklearn/linear_model/_sag.py:350: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
+  warnings.warn(
+
+
+
+
+

Add training parameter#

+
new_onx = add_output_initializer(
+    onx,
+    ['C', 'l1_ratio'],
+    [numpy.array([model.C]), numpy.array([model.l1_ratio])])
+
+
+
+
+

Inference#

+
sess = InferenceSession(new_onx.SerializeToString())
+print("output names:", [o.name for o in sess.get_outputs()])
+res = sess.run(None, {'X': X_test[:2]})
+print("outputs")
+pprint.pprint(res)
+
+
+
output names: ['label', 'probabilities', 'C', 'l1_ratio']
+outputs
+[array([2, 0], dtype=int64),
+ array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01],
+       [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32),
+ array([2.]),
+ array([0.5])]
+
+
+

The major draw back of this solution is increase the prediction +time as onnxruntime copies the constants for every prediction. +It is possible either to store those constant in a separate ONNX graph +or to removes them.

+
+
+

Select outputs#

+

Next function removes unneeded outputs from a model, +not only the constants. Next model only keeps the probabilities.

+
simple_onx = select_model_inputs_outputs(new_onx, ['probabilities'])
+
+sess = InferenceSession(simple_onx.SerializeToString())
+print("output names:", [o.name for o in sess.get_outputs()])
+res = sess.run(None, {'X': X_test[:2]})
+print("outputs")
+pprint.pprint(res)
+
+# Function *select_model_inputs_outputs* add also promote an intermediate
+# result to an output.
+#
+
+
+
output names: ['probabilities']
+outputs
+[array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01],
+       [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32)]
+
+
+

This example only uses ONNX graph in memory and never saves or loads a +model. This can be done by using the following snippets of code.

+
+
+

Save a model#

+
with open("simplified_model.onnx", "wb") as f:
+    f.write(simple_onx.SerializeToString())
+
+
+
+
+

Load a model#

+
model = load("simplified_model.onnx", "wb")
+
+sess = InferenceSession(model.SerializeToString())
+print("output names:", [o.name for o in sess.get_outputs()])
+res = sess.run(None, {'X': X_test[:2]})
+print("outputs")
+pprint.pprint(res)
+
+
+
output names: ['probabilities']
+outputs
+[array([[7.9804100e-05, 9.3561210e-02, 9.0635902e-01],
+       [9.8617423e-01, 1.3825771e-02, 8.3083460e-09]], dtype=float32)]
+
+
+

Total running time of the script: ( 0 minutes 0.040 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gbegin_dataframe.html b/auto_tutorial/plot_gbegin_dataframe.html index f8283849a..c33ef791a 100644 --- a/auto_tutorial/plot_gbegin_dataframe.html +++ b/auto_tutorial/plot_gbegin_dataframe.html @@ -1,790 +1,641 @@ - - - - - - - - - Dataframe as an input — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Dataframe as an input#

-

A pipeline usually ingests data as a matrix. It may be converted in a matrix -if all the data share the same type. But data held in a dataframe -have usually multiple types, float, integer or string for categories. -ONNX also supports that case.

- -
-

A dataset with categories#

-
from mlinsights.plotting import pipeline2dot
-import numpy
-import pprint
-from mlprodict.onnx_conv import guess_schema_from_data
-from onnxruntime import InferenceSession
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-from mlprodict.onnx_conv import to_onnx as to_onnx_ext
-from skl2onnx import to_onnx
-from pandas import DataFrame
-from sklearn.pipeline import Pipeline
-from sklearn.compose import ColumnTransformer
-from sklearn.preprocessing import OneHotEncoder
-from sklearn.ensemble import RandomForestClassifier
-
-
-data = DataFrame([
-    dict(CAT1='a', CAT2='c', num1=0.5, num2=0.6, y=0),
-    dict(CAT1='b', CAT2='d', num1=0.4, num2=0.8, y=1),
-    dict(CAT1='a', CAT2='d', num1=0.5, num2=0.56, y=0),
-    dict(CAT1='a', CAT2='d', num1=0.55, num2=0.56, y=1),
-    dict(CAT1='a', CAT2='c', num1=0.35, num2=0.86, y=0),
-    dict(CAT1='a', CAT2='c', num1=0.5, num2=0.68, y=1),
-])
-
-cat_cols = ['CAT1', 'CAT2']
-train_data = data.drop('y', axis=1)
-
-
-categorical_transformer = Pipeline([
-    ('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'))])
-preprocessor = ColumnTransformer(
-    transformers=[
-        ('cat', categorical_transformer, cat_cols)],
-    remainder='passthrough')
-pipe = Pipeline([('preprocess', preprocessor),
-                 ('rf', RandomForestClassifier())])
-pipe.fit(train_data, data['y'])
-
-
-
-
Pipeline(steps=[('preprocess',
-                 ColumnTransformer(remainder='passthrough',
-                                   transformers=[('cat',
-                                                  Pipeline(steps=[('onehot',
-                                                                   OneHotEncoder(handle_unknown='ignore',
-                                                                                 sparse=False))]),
-                                                  ['CAT1', 'CAT2'])])),
-                ('rf', RandomForestClassifier())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-

Display.

-
dot = pipeline2dot(pipe, train_data)
-ax = plot_graphviz(dot)
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot gbegin dataframe
-
-

Conversion to ONNX#

-

Function to_onnx does not handle dataframes.

-
try:
-    onx = to_onnx(pipe, train_data[:1])
-except NotImplementedError as e:
-    print(e)
-
-
-

But it possible to use an extended one.

-
onx = to_onnx_ext(
-    pipe, train_data[:1],
-    options={RandomForestClassifier: {'zipmap': False}})
-
-
-
-
-

Graph#

-
oinf = OnnxInference(onx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot gbegin dataframe
-
-

Prediction with ONNX#

-

onnxruntime does not support dataframes.

-
sess = InferenceSession(onx.SerializeToString())
-try:
-    sess.run(None, train_data)
-except Exception as e:
-    print(e)
-
-
-

Out:

-
run(): incompatible function arguments. The following argument types are supported:
-    1. (self: onnxruntime.capi.onnxruntime_pybind11_state.InferenceSession, arg0: List[str], arg1: Dict[str, object], arg2: onnxruntime.capi.onnxruntime_pybind11_state.RunOptions) -> List[object]
-
-Invoked with: <onnxruntime.capi.onnxruntime_pybind11_state.InferenceSession object at 0x000001A9D108F4B0>, ['label', 'probabilities'],   CAT1 CAT2  num1  num2
-0    a    c  0.50  0.60
-1    b    d  0.40  0.80
-2    a    d  0.50  0.56
-3    a    d  0.55  0.56
-4    a    c  0.35  0.86
-5    a    c  0.50  0.68, None
-
-
-

Let’s use a shortcut

-
oinf = OnnxInference(onx)
-got = oinf.run(train_data)
-print(pipe.predict(train_data))
-print(got['label'])
-
-
-

Out:

-
[0 1 0 1 0 1]
-[0 1 0 1 0 1]
-
-
-

And probilities.

-
print(pipe.predict_proba(train_data))
-print(got['probabilities'])
-
-
-

Out:

-
[[0.75 0.25]
- [0.26 0.74]
- [0.79 0.21]
- [0.31 0.69]
- [0.7  0.3 ]
- [0.31 0.69]]
-[[0.75       0.25      ]
- [0.26       0.74      ]
- [0.78999996 0.21000001]
- [0.31       0.69      ]
- [0.70000005 0.29999998]
- [0.30999994 0.69000006]]
-
-
-

It looks ok. Let’s dig into the details to -directly use onnxruntime.

-
-
-

Unhide conversion logic with a dataframe#

-

A dataframe can be seen as a set of columns with -different types. That’s what ONNX should see: -a list of inputs, the input name is the column name, -the input type is the column type.

-
init = guess_schema_from_data(train_data)
-
-pprint.pprint(init)
-
-
-

Out:

-
[('CAT1', StringTensorType(shape=[None, 1])),
- ('CAT2', StringTensorType(shape=[None, 1])),
- ('num1', DoubleTensorType(shape=[None, 1])),
- ('num2', DoubleTensorType(shape=[None, 1]))]
-
-
-

Let’s use float instead.

-
for c in train_data.columns:
-    if c not in cat_cols:
-        train_data[c] = train_data[c].astype(numpy.float32)
-
-init = guess_schema_from_data(train_data)
-pprint.pprint(init)
-
-
-

Out:

-
[('CAT1', StringTensorType(shape=[None, 1])),
- ('CAT2', StringTensorType(shape=[None, 1])),
- ('num1', FloatTensorType(shape=[None, 1])),
- ('num2', FloatTensorType(shape=[None, 1]))]
-
-
-

Let’s convert with skl2onnx only.

-
onx2 = to_onnx(
-    pipe, initial_types=init,
-    options={RandomForestClassifier: {'zipmap': False}})
-
-
-

Let’s run it with onnxruntime. -We need to convert the dataframe into a dictionary -where column names become keys, and column values become -values.

-
inputs = {c: train_data[c].values.reshape((-1, 1))
-          for c in train_data.columns}
-pprint.pprint(inputs)
-
-
-

Out:

-
{'CAT1': array([['a'],
-       ['b'],
-       ['a'],
-       ['a'],
-       ['a'],
-       ['a']], dtype=object),
- 'CAT2': array([['c'],
-       ['d'],
-       ['d'],
-       ['d'],
-       ['c'],
-       ['c']], dtype=object),
- 'num1': array([[0.5 ],
-       [0.4 ],
-       [0.5 ],
-       [0.55],
-       [0.35],
-       [0.5 ]], dtype=float32),
- 'num2': array([[0.6 ],
-       [0.8 ],
-       [0.56],
-       [0.56],
-       [0.86],
-       [0.68]], dtype=float32)}
-
-
-

Inference.

-
sess2 = InferenceSession(onx2.SerializeToString())
-
-got2 = sess2.run(None, inputs)
-
-print(pipe.predict(train_data))
-print(got2[0])
-
-
-

Out:

-
[0 1 0 1 0 1]
-[0 1 0 1 0 1]
-
-
-

And probilities.

-
print(pipe.predict_proba(train_data))
-print(got2[1])
-
-
-

Out:

-
[[0.75 0.25]
- [0.26 0.74]
- [0.79 0.21]
- [0.31 0.69]
- [0.7  0.3 ]
- [0.31 0.69]]
-[[0.75       0.25000003]
- [0.2600004  0.7399996 ]
- [0.78999996 0.21000002]
- [0.31000036 0.68999964]
- [0.70000005 0.29999998]
- [0.31000036 0.68999964]]
-
-
-

Total running time of the script: ( 0 minutes 1.416 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Dataframe as an input - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Dataframe as an input#

+

A pipeline usually ingests data as a matrix. It may be converted in a matrix +if all the data share the same type. But data held in a dataframe +have usually multiple types, float, integer or string for categories. +ONNX also supports that case.

+
+

A dataset with categories#

+
from mlinsights.plotting import pipeline2dot
+import numpy
+import pprint
+from mlprodict.onnx_conv import guess_schema_from_data
+from onnxruntime import InferenceSession
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+from mlprodict.onnx_conv import to_onnx as to_onnx_ext
+from skl2onnx import to_onnx
+from pandas import DataFrame
+from sklearn.pipeline import Pipeline
+from sklearn.compose import ColumnTransformer
+from sklearn.preprocessing import OneHotEncoder
+from sklearn.ensemble import RandomForestClassifier
+
+
+data = DataFrame([
+    dict(CAT1='a', CAT2='c', num1=0.5, num2=0.6, y=0),
+    dict(CAT1='b', CAT2='d', num1=0.4, num2=0.8, y=1),
+    dict(CAT1='a', CAT2='d', num1=0.5, num2=0.56, y=0),
+    dict(CAT1='a', CAT2='d', num1=0.55, num2=0.56, y=1),
+    dict(CAT1='a', CAT2='c', num1=0.35, num2=0.86, y=0),
+    dict(CAT1='a', CAT2='c', num1=0.5, num2=0.68, y=1),
+])
+
+cat_cols = ['CAT1', 'CAT2']
+train_data = data.drop('y', axis=1)
+
+
+categorical_transformer = Pipeline([
+    ('onehot', OneHotEncoder(sparse=False, handle_unknown='ignore'))])
+preprocessor = ColumnTransformer(
+    transformers=[
+        ('cat', categorical_transformer, cat_cols)],
+    remainder='passthrough')
+pipe = Pipeline([('preprocess', preprocessor),
+                 ('rf', RandomForestClassifier())])
+pipe.fit(train_data, data['y'])
+
+
+
+
Pipeline(steps=[('preprocess',
+                 ColumnTransformer(remainder='passthrough',
+                                   transformers=[('cat',
+                                                  Pipeline(steps=[('onehot',
+                                                                   OneHotEncoder(handle_unknown='ignore',
+                                                                                 sparse=False))]),
+                                                  ['CAT1', 'CAT2'])])),
+                ('rf', RandomForestClassifier())])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+

Display.

+
dot = pipeline2dot(pipe, train_data)
+ax = plot_graphviz(dot)
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot gbegin dataframe
+
+

Conversion to ONNX#

+

Function to_onnx does not handle dataframes.

+
try:
+    onx = to_onnx(pipe, train_data[:1])
+except NotImplementedError as e:
+    print(e)
+
+
+

But it possible to use an extended one.

+
onx = to_onnx_ext(
+    pipe, train_data[:1],
+    options={RandomForestClassifier: {'zipmap': False}})
+
+
+
+
+

Graph#

+
oinf = OnnxInference(onx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot gbegin dataframe
+
+

Prediction with ONNX#

+

onnxruntime does not support dataframes.

+
sess = InferenceSession(onx.SerializeToString())
+try:
+    sess.run(None, train_data)
+except Exception as e:
+    print(e)
+
+
+
run(): incompatible function arguments. The following argument types are supported:
+    1. (self: onnxruntime.capi.onnxruntime_pybind11_state.InferenceSession, arg0: List[str], arg1: Dict[str, object], arg2: onnxruntime.capi.onnxruntime_pybind11_state.RunOptions) -> List[object]
+
+Invoked with: <onnxruntime.capi.onnxruntime_pybind11_state.InferenceSession object at 0x7ff2739429b0>, ['label', 'probabilities'],   CAT1 CAT2  num1  num2
+0    a    c  0.50  0.60
+1    b    d  0.40  0.80
+2    a    d  0.50  0.56
+3    a    d  0.55  0.56
+4    a    c  0.35  0.86
+5    a    c  0.50  0.68, None
+
+
+

Let’s use a shortcut

+
oinf = OnnxInference(onx)
+got = oinf.run(train_data)
+print(pipe.predict(train_data))
+print(got['label'])
+
+
+
[0 1 0 1 0 1]
+[0 1 0 1 0 1]
+
+
+

And probilities.

+
print(pipe.predict_proba(train_data))
+print(got['probabilities'])
+
+
+
[[0.79 0.21]
+ [0.34 0.66]
+ [0.73 0.27]
+ [0.28 0.72]
+ [0.71 0.29]
+ [0.34 0.66]]
+[[0.79       0.20999998]
+ [0.34000003 0.65999997]
+ [0.73       0.26999998]
+ [0.27999997 0.72      ]
+ [0.71000004 0.28999996]
+ [0.34000003 0.65999997]]
+
+
+

It looks ok. Let’s dig into the details to +directly use onnxruntime.

+
+
+

Unhide conversion logic with a dataframe#

+

A dataframe can be seen as a set of columns with +different types. That’s what ONNX should see: +a list of inputs, the input name is the column name, +the input type is the column type.

+
init = guess_schema_from_data(train_data)
+
+pprint.pprint(init)
+
+
+
[('CAT1', StringTensorType(shape=[None, 1])),
+ ('CAT2', StringTensorType(shape=[None, 1])),
+ ('num1', DoubleTensorType(shape=[None, 1])),
+ ('num2', DoubleTensorType(shape=[None, 1]))]
+
+
+

Let’s use float instead.

+
for c in train_data.columns:
+    if c not in cat_cols:
+        train_data[c] = train_data[c].astype(numpy.float32)
+
+init = guess_schema_from_data(train_data)
+pprint.pprint(init)
+
+
+
[('CAT1', StringTensorType(shape=[None, 1])),
+ ('CAT2', StringTensorType(shape=[None, 1])),
+ ('num1', FloatTensorType(shape=[None, 1])),
+ ('num2', FloatTensorType(shape=[None, 1]))]
+
+
+

Let’s convert with skl2onnx only.

+
onx2 = to_onnx(
+    pipe, initial_types=init,
+    options={RandomForestClassifier: {'zipmap': False}})
+
+
+

Let’s run it with onnxruntime. +We need to convert the dataframe into a dictionary +where column names become keys, and column values become +values.

+
inputs = {c: train_data[c].values.reshape((-1, 1))
+          for c in train_data.columns}
+pprint.pprint(inputs)
+
+
+
{'CAT1': array([['a'],
+       ['b'],
+       ['a'],
+       ['a'],
+       ['a'],
+       ['a']], dtype=object),
+ 'CAT2': array([['c'],
+       ['d'],
+       ['d'],
+       ['d'],
+       ['c'],
+       ['c']], dtype=object),
+ 'num1': array([[0.5 ],
+       [0.4 ],
+       [0.5 ],
+       [0.55],
+       [0.35],
+       [0.5 ]], dtype=float32),
+ 'num2': array([[0.6 ],
+       [0.8 ],
+       [0.56],
+       [0.56],
+       [0.86],
+       [0.68]], dtype=float32)}
+
+
+

Inference.

+
sess2 = InferenceSession(onx2.SerializeToString())
+
+got2 = sess2.run(None, inputs)
+
+print(pipe.predict(train_data))
+print(got2[0])
+
+
+
[0 1 0 1 0 1]
+[0 1 0 1 0 1]
+
+
+

And probilities.

+
print(pipe.predict_proba(train_data))
+print(got2[1])
+
+
+
[[0.79 0.21]
+ [0.34 0.66]
+ [0.73 0.27]
+ [0.28 0.72]
+ [0.71 0.29]
+ [0.34 0.66]]
+[[0.78999996 0.21000002]
+ [0.34000033 0.65999967]
+ [0.73       0.27      ]
+ [0.2800004  0.7199996 ]
+ [0.71000004 0.29      ]
+ [0.34000033 0.65999967]]
+
+
+

Total running time of the script: ( 0 minutes 1.130 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gbegin_transfer_learning.html b/auto_tutorial/plot_gbegin_transfer_learning.html index c23eed852..cbe03987c 100644 --- a/auto_tutorial/plot_gbegin_transfer_learning.html +++ b/auto_tutorial/plot_gbegin_transfer_learning.html @@ -1,892 +1,733 @@ - - - - - - - - - Transfer Learning with ONNX — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Transfer Learning with ONNX#

-

Transfer learning is common with deep learning. -A deep learning model is used as preprocessing before -the output is sent to a final classifier or regressor. -It is not quite easy in this case to mix framework, -scikit-learn with pytorch -(or skorch), the Keras API for Tensorflow, -tf.keras.wrappers.scikit_learn. Every combination -requires work. ONNX reduces the number of platforms to -support. Once the model is converted into ONNX, -it can be inserted in any scikit-learn pipeline.

- -
-

Retrieve and load a model#

-

We download one model from the :epkg:`ONNX Zoo` but the model -could be trained and produced by another converter library.

-
import sys
-from io import BytesIO
-import onnx
-from mlprodict.sklapi import OnnxTransformer
-from sklearn.decomposition import PCA
-from sklearn.pipeline import Pipeline
-from mlinsights.plotting.gallery import plot_gallery_images
-import matplotlib.pyplot as plt
-from skl2onnx.tutorial.imagenet_classes import class_names
-import numpy
-from PIL import Image
-from onnxruntime import InferenceSession
-from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
-import os
-import urllib.request
-
-
-def download_file(url, name, min_size):
-    if not os.path.exists(name):
-        print("download '%s'" % url)
-        with urllib.request.urlopen(url) as u:
-            content = u.read()
-        if len(content) < min_size:
-            raise RuntimeError(
-                "Unable to download '{}' due to\n{}".format(
-                    url, content))
-        print("downloaded %d bytes." % len(content))
-        with open(name, "wb") as f:
-            f.write(content)
-    else:
-        print("'%s' already downloaded" % name)
-
-
-model_name = "squeezenet1.1-7.onnx"
-url_name = ("https://github.com/onnx/models/raw/main/vision/"
-            "classification/squeezenet/model")
-url_name += "/" + model_name
-try:
-    download_file(url_name, model_name, 100000)
-except RuntimeError as e:
-    print(e)
-    sys.exit(1)
-
-
-

Out:

-
download 'https://github.com/onnx/models/raw/main/vision/classification/squeezenet/model/squeezenet1.1-7.onnx'
-downloaded 4956208 bytes.
-
-
-

Loading the ONNX file and use it on one image.

-
-
-

Out:

-
NodeArg(name='data', type='tensor(float)', shape=[1, 3, 224, 224])
-
-
-

The model expects a series of images of size -[3, 224, 224].

-
-
-

Classifying an image#

-
url = ("https://upload.wikimedia.org/wikipedia/commons/d/d2/"
-       "East_Coker_elm%2C_2.jpg")
-img = "East_Coker_elm.jpg"
-download_file(url, img, 100000)
-
-im0 = Image.open(img)
-im = im0.resize((224, 224))
-# im.show()
-
-
-

Out:

-
download 'https://upload.wikimedia.org/wikipedia/commons/d/d2/East_Coker_elm%2C_2.jpg'
-downloaded 712230 bytes.
-
-
-

Image to numpy and predection.

-
def im2array(im):
-    X = numpy.asarray(im)
-    X = X.transpose(2, 0, 1)
-    X = X.reshape(1, 3, 224, 224)
-    return X
-
-
-X = im2array(im)
-out = sess.run(None, {'data': X.astype(numpy.float32)})
-out = out[0]
-
-print(out[0, :5])
-
-
-

Out:

-
[145.59464   55.06765   60.599792  46.293953  37.982464]
-
-
-

Interpretation

-
res = list(sorted((r, class_names[i]) for i, r in enumerate(out[0])))
-print(res[-5:])
-
-
-

Out:

-
[(205.84174, 'Samoyed, Samoyede'), (212.03664, 'park bench'), (225.50691, 'lakeside, lakeshore'), (232.90251, 'fountain'), (258.10968, 'geyser')]
-
-
-
-
-

Classifying more images#

-

The initial image is rotated, -the answer is changing.

-
angles = [a * 2. for a in range(-6, 6)]
-imgs = [(angle, im0.rotate(angle).resize((224, 224)))
-        for angle in angles]
-
-
-def classify(imgs):
-    labels = []
-    for angle, img in imgs:
-        X = im2array(img)
-        probs = sess.run(None, {'data': X.astype(numpy.float32)})[0]
-        pl = list(sorted(
-            ((r, class_names[i]) for i, r in enumerate(probs[0])),
-            reverse=True))
-        labels.append((angle, pl))
-    return labels
-
-
-climgs = classify(imgs)
-for angle, res in climgs:
-    print("angle={} - {}".format(angle, res[:5]))
-
-
-plot_gallery_images([img[1] for img in imgs],
-                    [img[1][0][1][:15] for img in climgs])
-
-
-plot gbegin transfer learning

Out:

-
angle=-12.0 - [(247.06146, 'obelisk'), (238.95372, 'car mirror'), (235.27646, 'flagpole, flagstaff'), (231.51707, 'window screen'), (230.90657, 'picket fence, paling')]
-angle=-10.0 - [(254.24683, 'car mirror'), (251.51357, 'obelisk'), (235.10512, 'groom, bridegroom'), (234.5295, 'picket fence, paling'), (232.13913, 'church, church building')]
-angle=-8.0 - [(235.56952, 'obelisk'), (226.59697, 'car mirror'), (226.46773, 'picket fence, paling'), (221.46794, 'groom, bridegroom'), (220.88506, 'fountain')]
-angle=-6.0 - [(265.50806, 'geyser'), (243.68619, 'obelisk'), (238.92957, 'fountain'), (226.73683, 'pedestal, plinth, footstall'), (226.11952, 'Great Pyrenees')]
-angle=-4.0 - [(287.7449, 'geyser'), (255.25323, 'fountain'), (236.84944, 'obelisk'), (223.02913, 'Great Pyrenees'), (222.80464, 'church, church building')]
-angle=-2.0 - [(267.63528, 'geyser'), (251.48958, 'fountain'), (214.64241, 'obelisk'), (214.56227, 'mobile home, manufactured home'), (213.12424, 'flagpole, flagstaff')]
-angle=0.0 - [(258.10968, 'geyser'), (232.90251, 'fountain'), (225.50691, 'lakeside, lakeshore'), (212.03664, 'park bench'), (205.84174, 'Samoyed, Samoyede')]
-angle=2.0 - [(222.74826, 'geyser'), (213.38457, 'fountain'), (212.24376, 'obelisk'), (198.3714, 'beacon, lighthouse, beacon light, pharos'), (197.43805, 'picket fence, paling')]
-angle=4.0 - [(221.34749, 'geyser'), (209.60362, 'fountain'), (207.0692, 'American egret, great white heron, Egretta albus'), (201.63098, 'obelisk'), (198.75673, 'Great Pyrenees')]
-angle=6.0 - [(230.98735, 'American egret, great white heron, Egretta albus'), (216.6342, 'fountain'), (212.73236, 'groom, bridegroom'), (209.60934, 'flagpole, flagstaff'), (209.46207, 'swimming trunks, bathing trunks')]
-angle=8.0 - [(253.32706, 'American egret, great white heron, Egretta albus'), (222.6997, 'golf ball'), (222.50499, 'groom, bridegroom'), (222.36351, 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita'), (217.73135, 'swimming trunks, bathing trunks')]
-angle=10.0 - [(244.3011, 'solar dish, solar collector, solar furnace'), (239.57332, 'flagpole, flagstaff'), (234.92139, 'picket fence, paling'), (230.62114, 'car mirror'), (221.8794, 'screen, CRT screen')]
-
-array([[<AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>],
-       [<AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>],
-       [<AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>, <AxesSubplot:>]],
-      dtype=object)
-
-
-
-
-

Transfer learning in a pipeline#

-

The proposed transfer learning consists -using a PCA to projet the probabilities -on a graph.

-
with open(model_name, 'rb') as f:
-    model_bytes = f.read()
-
-pipe = Pipeline(steps=[
-    ('deep', OnnxTransformer(
-        model_bytes, runtime='onnxruntime1', change_batch_size=0)),
-    ('pca', PCA(2))
-])
-
-X_train = numpy.vstack(
-    [im2array(img) for _, img in imgs]).astype(numpy.float32)
-pipe.fit(X_train)
-
-proj = pipe.transform(X_train)
-print(proj)
-
-
-

Out:

-
[[-676.57574  -203.35437 ]
- [-570.6653   -208.0976  ]
- [-339.81192   -86.34015 ]
- [ -14.556089 -168.44864 ]
- [ 357.22345  -157.61432 ]
- [ 596.3862    -90.20996 ]
- [ 918.8612    -26.33939 ]
- [ 499.87164   128.27281 ]
- [ 306.68573   156.42908 ]
- [-125.91209   119.21805 ]
- [-446.6046    342.45844 ]
- [-504.90253   194.02596 ]]
-
-
-
-

Graph for the PCA#

-
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
-ax.plot(proj[:, 0], proj[:, 1], 'o')
-ax.set_title("Projection of classification probabilities")
-text = ["%1.0f-%s" % (el[0], el[1][0][1]) for el in climgs]
-for label, x, y in zip(text, proj[:, 0], proj[:, 1]):
-    ax.annotate(
-        label, xy=(x, y), xytext=(-10, 10), fontsize=8,
-        textcoords='offset points', ha='right', va='bottom',
-        bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
-        arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
-
-
-Projection of classification probabilities
-
-

Remove one layer at the end#

-

The last is often removed before the model is -inserted in a pipeline. Let’s see how to do that. -First, we need the list of output for every node.

-
model_onnx = onnx.load(BytesIO(model_bytes))
-outputs = []
-for node in model_onnx.graph.node:
-    print(node.name, node.output)
-    outputs.extend(node.output)
-
-
-

Out:

-
squeezenet0_conv0_fwd ['squeezenet0_conv0_fwd']
-squeezenet0_relu0_fwd ['squeezenet0_relu0_fwd']
-squeezenet0_pool0_fwd ['squeezenet0_pool0_fwd']
-squeezenet0_conv1_fwd ['squeezenet0_conv1_fwd']
-squeezenet0_relu1_fwd ['squeezenet0_relu1_fwd']
-squeezenet0_conv2_fwd ['squeezenet0_conv2_fwd']
-squeezenet0_relu2_fwd ['squeezenet0_relu2_fwd']
-squeezenet0_conv3_fwd ['squeezenet0_conv3_fwd']
-squeezenet0_relu3_fwd ['squeezenet0_relu3_fwd']
-squeezenet0_concat0 ['squeezenet0_concat0']
-squeezenet0_conv4_fwd ['squeezenet0_conv4_fwd']
-squeezenet0_relu4_fwd ['squeezenet0_relu4_fwd']
-squeezenet0_conv5_fwd ['squeezenet0_conv5_fwd']
-squeezenet0_relu5_fwd ['squeezenet0_relu5_fwd']
-squeezenet0_conv6_fwd ['squeezenet0_conv6_fwd']
-squeezenet0_relu6_fwd ['squeezenet0_relu6_fwd']
-squeezenet0_concat1 ['squeezenet0_concat1']
-squeezenet0_pool1_fwd ['squeezenet0_pool1_fwd']
-squeezenet0_conv7_fwd ['squeezenet0_conv7_fwd']
-squeezenet0_relu7_fwd ['squeezenet0_relu7_fwd']
-squeezenet0_conv8_fwd ['squeezenet0_conv8_fwd']
-squeezenet0_relu8_fwd ['squeezenet0_relu8_fwd']
-squeezenet0_conv9_fwd ['squeezenet0_conv9_fwd']
-squeezenet0_relu9_fwd ['squeezenet0_relu9_fwd']
-squeezenet0_concat2 ['squeezenet0_concat2']
-squeezenet0_conv10_fwd ['squeezenet0_conv10_fwd']
-squeezenet0_relu10_fwd ['squeezenet0_relu10_fwd']
-squeezenet0_conv11_fwd ['squeezenet0_conv11_fwd']
-squeezenet0_relu11_fwd ['squeezenet0_relu11_fwd']
-squeezenet0_conv12_fwd ['squeezenet0_conv12_fwd']
-squeezenet0_relu12_fwd ['squeezenet0_relu12_fwd']
-squeezenet0_concat3 ['squeezenet0_concat3']
-squeezenet0_pool2_fwd ['squeezenet0_pool2_fwd']
-squeezenet0_conv13_fwd ['squeezenet0_conv13_fwd']
-squeezenet0_relu13_fwd ['squeezenet0_relu13_fwd']
-squeezenet0_conv14_fwd ['squeezenet0_conv14_fwd']
-squeezenet0_relu14_fwd ['squeezenet0_relu14_fwd']
-squeezenet0_conv15_fwd ['squeezenet0_conv15_fwd']
-squeezenet0_relu15_fwd ['squeezenet0_relu15_fwd']
-squeezenet0_concat4 ['squeezenet0_concat4']
-squeezenet0_conv16_fwd ['squeezenet0_conv16_fwd']
-squeezenet0_relu16_fwd ['squeezenet0_relu16_fwd']
-squeezenet0_conv17_fwd ['squeezenet0_conv17_fwd']
-squeezenet0_relu17_fwd ['squeezenet0_relu17_fwd']
-squeezenet0_conv18_fwd ['squeezenet0_conv18_fwd']
-squeezenet0_relu18_fwd ['squeezenet0_relu18_fwd']
-squeezenet0_concat5 ['squeezenet0_concat5']
-squeezenet0_conv19_fwd ['squeezenet0_conv19_fwd']
-squeezenet0_relu19_fwd ['squeezenet0_relu19_fwd']
-squeezenet0_conv20_fwd ['squeezenet0_conv20_fwd']
-squeezenet0_relu20_fwd ['squeezenet0_relu20_fwd']
-squeezenet0_conv21_fwd ['squeezenet0_conv21_fwd']
-squeezenet0_relu21_fwd ['squeezenet0_relu21_fwd']
-squeezenet0_concat6 ['squeezenet0_concat6']
-squeezenet0_conv22_fwd ['squeezenet0_conv22_fwd']
-squeezenet0_relu22_fwd ['squeezenet0_relu22_fwd']
-squeezenet0_conv23_fwd ['squeezenet0_conv23_fwd']
-squeezenet0_relu23_fwd ['squeezenet0_relu23_fwd']
-squeezenet0_conv24_fwd ['squeezenet0_conv24_fwd']
-squeezenet0_relu24_fwd ['squeezenet0_relu24_fwd']
-squeezenet0_concat7 ['squeezenet0_concat7']
-squeezenet0_dropout0_fwd ['squeezenet0_dropout0_fwd']
-squeezenet0_conv25_fwd ['squeezenet0_conv25_fwd']
-squeezenet0_relu25_fwd ['squeezenet0_relu25_fwd']
-squeezenet0_pool3_fwd ['squeezenet0_pool3_fwd']
-squeezenet0_flatten0_reshape0 ['squeezenet0_flatten0_reshape0']
-
-
-

We select one of the last one.

-
selected = outputs[-3]
-print("selected", selected)
-
-
-

Out:

-
selected squeezenet0_relu25_fwd
-
-
-

And we tell OnnxTransformer to use that -specific one and to flatten the output -as the dimension is not a matrix.

-
pipe2 = Pipeline(steps=[
-    ('deep', OnnxTransformer(
-        model_bytes, runtime='onnxruntime1', change_batch_size=0,
-        output_name=selected, reshape=True)),
-    ('pca', PCA(2))
-])
-
-try:
-    pipe2.fit(X_train)
-except InvalidArgument as e:
-    print("Unable to fit due to", e)
-
-
-

We check that it is different. -The following values are the shape of the -PCA components. The number of column is the number -of dimensions of the outputs of the transfered -neural network.

-
print(pipe.steps[1][1].components_.shape,
-      pipe2.steps[1][1].components_.shape)
-
-
-

Out:

-
(2, 1000) (2, 169000)
-
-
-

Graph again.

-
proj2 = pipe2.transform(X_train)
-
-fig, ax = plt.subplots(1, 1, figsize=(5, 5))
-ax.plot(proj2[:, 0], proj2[:, 1], 'o')
-ax.set_title("Second projection of classification probabilities")
-text = ["%1.0f-%s" % (el[0], el[1][0][1]) for el in climgs]
-for label, x, y in zip(text, proj2[:, 0], proj2[:, 1]):
-    ax.annotate(
-        label, xy=(x, y), xytext=(-10, 10), fontsize=8,
-        textcoords='offset points', ha='right', va='bottom',
-        bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
-        arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
-
-
-Second projection of classification probabilities

Total running time of the script: ( 0 minutes 5.219 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Transfer Learning with ONNX - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Transfer Learning with ONNX#

+

Transfer learning is common with deep learning. +A deep learning model is used as preprocessing before +the output is sent to a final classifier or regressor. +It is not quite easy in this case to mix framework, +scikit-learn with pytorch +(or skorch), the Keras API for Tensorflow, +tf.keras.wrappers.scikit_learn. Every combination +requires work. ONNX reduces the number of platforms to +support. Once the model is converted into ONNX, +it can be inserted in any scikit-learn pipeline.

+
+

Retrieve and load a model#

+

We download one model from the :epkg:`ONNX Zoo` but the model +could be trained and produced by another converter library.

+
import sys
+from io import BytesIO
+import onnx
+from mlprodict.sklapi import OnnxTransformer
+from sklearn.decomposition import PCA
+from sklearn.pipeline import Pipeline
+from mlinsights.plotting.gallery import plot_gallery_images
+import matplotlib.pyplot as plt
+from skl2onnx.tutorial.imagenet_classes import class_names
+import numpy
+from PIL import Image
+from onnxruntime import InferenceSession
+from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
+import os
+import urllib.request
+
+
+def download_file(url, name, min_size):
+    if not os.path.exists(name):
+        print("download '%s'" % url)
+        with urllib.request.urlopen(url) as u:
+            content = u.read()
+        if len(content) < min_size:
+            raise RuntimeError(
+                "Unable to download '{}' due to\n{}".format(
+                    url, content))
+        print("downloaded %d bytes." % len(content))
+        with open(name, "wb") as f:
+            f.write(content)
+    else:
+        print("'%s' already downloaded" % name)
+
+
+model_name = "squeezenet1.1-7.onnx"
+url_name = ("https://github.com/onnx/models/raw/main/vision/"
+            "classification/squeezenet/model")
+url_name += "/" + model_name
+try:
+    download_file(url_name, model_name, 100000)
+except RuntimeError as e:
+    print(e)
+    sys.exit(1)
+
+
+
'squeezenet1.1-7.onnx' already downloaded
+
+
+

Loading the ONNX file and use it on one image.

+
+
+
NodeArg(name='data', type='tensor(float)', shape=[1, 3, 224, 224])
+
+
+

The model expects a series of images of size +[3, 224, 224].

+
+
+

Classifying an image#

+
url = ("https://upload.wikimedia.org/wikipedia/commons/d/d2/"
+       "East_Coker_elm%2C_2.jpg")
+img = "East_Coker_elm.jpg"
+download_file(url, img, 100000)
+
+im0 = Image.open(img)
+im = im0.resize((224, 224))
+# im.show()
+
+
+
'East_Coker_elm.jpg' already downloaded
+
+
+

Image to numpy and predection.

+
def im2array(im):
+    X = numpy.asarray(im)
+    X = X.transpose(2, 0, 1)
+    X = X.reshape(1, 3, 224, 224)
+    return X
+
+
+X = im2array(im)
+out = sess.run(None, {'data': X.astype(numpy.float32)})
+out = out[0]
+
+print(out[0, :5])
+
+
+
[145.59459   55.06765   60.599808  46.293957  37.982475]
+
+
+

Interpretation

+
res = list(sorted((r, class_names[i]) for i, r in enumerate(out[0])))
+print(res[-5:])
+
+
+
[(205.84172, 'Samoyed, Samoyede'), (212.0366, 'park bench'), (225.50687, 'lakeside, lakeshore'), (232.90251, 'fountain'), (258.10965, 'geyser')]
+
+
+
+
+

Classifying more images#

+

The initial image is rotated, +the answer is changing.

+
angles = [a * 2. for a in range(-6, 6)]
+imgs = [(angle, im0.rotate(angle).resize((224, 224)))
+        for angle in angles]
+
+
+def classify(imgs):
+    labels = []
+    for angle, img in imgs:
+        X = im2array(img)
+        probs = sess.run(None, {'data': X.astype(numpy.float32)})[0]
+        pl = list(sorted(
+            ((r, class_names[i]) for i, r in enumerate(probs[0])),
+            reverse=True))
+        labels.append((angle, pl))
+    return labels
+
+
+climgs = classify(imgs)
+for angle, res in climgs:
+    print("angle={} - {}".format(angle, res[:5]))
+
+
+plot_gallery_images([img[1] for img in imgs],
+                    [img[1][0][1][:15] for img in climgs])
+
+
+plot gbegin transfer learning
angle=-12.0 - [(247.06139, 'obelisk'), (238.9538, 'car mirror'), (235.27649, 'flagpole, flagstaff'), (231.5171, 'window screen'), (230.90662, 'picket fence, paling')]
+angle=-10.0 - [(254.24677, 'car mirror'), (251.51357, 'obelisk'), (235.10507, 'groom, bridegroom'), (234.52951, 'picket fence, paling'), (232.13913, 'church, church building')]
+angle=-8.0 - [(235.5695, 'obelisk'), (226.59703, 'car mirror'), (226.46768, 'picket fence, paling'), (221.46794, 'groom, bridegroom'), (220.88501, 'fountain')]
+angle=-6.0 - [(265.508, 'geyser'), (243.68616, 'obelisk'), (238.9296, 'fountain'), (226.73679, 'pedestal, plinth, footstall'), (226.11945, 'Great Pyrenees')]
+angle=-4.0 - [(287.74472, 'geyser'), (255.25317, 'fountain'), (236.84944, 'obelisk'), (223.02904, 'Great Pyrenees'), (222.80466, 'church, church building')]
+angle=-2.0 - [(267.6353, 'geyser'), (251.4896, 'fountain'), (214.64238, 'obelisk'), (214.56232, 'mobile home, manufactured home'), (213.12415, 'flagpole, flagstaff')]
+angle=0.0 - [(258.10965, 'geyser'), (232.90251, 'fountain'), (225.50687, 'lakeside, lakeshore'), (212.0366, 'park bench'), (205.84172, 'Samoyed, Samoyede')]
+angle=2.0 - [(222.74826, 'geyser'), (213.38455, 'fountain'), (212.24373, 'obelisk'), (198.37132, 'beacon, lighthouse, beacon light, pharos'), (197.4381, 'picket fence, paling')]
+angle=4.0 - [(221.34743, 'geyser'), (209.60362, 'fountain'), (207.06918, 'American egret, great white heron, Egretta albus'), (201.63097, 'obelisk'), (198.7567, 'Great Pyrenees')]
+angle=6.0 - [(230.9874, 'American egret, great white heron, Egretta albus'), (216.63417, 'fountain'), (212.73239, 'groom, bridegroom'), (209.60934, 'flagpole, flagstaff'), (209.46214, 'swimming trunks, bathing trunks')]
+angle=8.0 - [(253.32697, 'American egret, great white heron, Egretta albus'), (222.6997, 'golf ball'), (222.50497, 'groom, bridegroom'), (222.36351, 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita'), (217.73135, 'swimming trunks, bathing trunks')]
+angle=10.0 - [(244.30106, 'solar dish, solar collector, solar furnace'), (239.57333, 'flagpole, flagstaff'), (234.92139, 'picket fence, paling'), (230.62112, 'car mirror'), (221.87936, 'screen, CRT screen')]
+
+array([[<AxesSubplot: >, <AxesSubplot: >, <AxesSubplot: >,
+        <AxesSubplot: >],
+       [<AxesSubplot: >, <AxesSubplot: >, <AxesSubplot: >,
+        <AxesSubplot: >],
+       [<AxesSubplot: >, <AxesSubplot: >, <AxesSubplot: >,
+        <AxesSubplot: >]], dtype=object)
+
+
+
+
+

Transfer learning in a pipeline#

+

The proposed transfer learning consists +using a PCA to projet the probabilities +on a graph.

+
with open(model_name, 'rb') as f:
+    model_bytes = f.read()
+
+pipe = Pipeline(steps=[
+    ('deep', OnnxTransformer(
+        model_bytes, runtime='onnxruntime1', change_batch_size=0)),
+    ('pca', PCA(2))
+])
+
+X_train = numpy.vstack(
+    [im2array(img) for _, img in imgs]).astype(numpy.float32)
+pipe.fit(X_train)
+
+proj = pipe.transform(X_train)
+print(proj)
+
+
+
[[-676.5764   -203.35454 ]
+ [-570.6655   -208.09769 ]
+ [-339.81168   -86.33986 ]
+ [ -14.555651 -168.44836 ]
+ [ 357.22372  -157.61395 ]
+ [ 596.38617   -90.210175]
+ [ 918.8613    -26.339687]
+ [ 499.87177   128.2728  ]
+ [ 306.68564   156.42897 ]
+ [-125.91207   119.218216]
+ [-446.60468   342.45862 ]
+ [-504.90256   194.02576 ]]
+
+
+
+

Graph for the PCA#

+
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
+ax.plot(proj[:, 0], proj[:, 1], 'o')
+ax.set_title("Projection of classification probabilities")
+text = ["%1.0f-%s" % (el[0], el[1][0][1]) for el in climgs]
+for label, x, y in zip(text, proj[:, 0], proj[:, 1]):
+    ax.annotate(
+        label, xy=(x, y), xytext=(-10, 10), fontsize=8,
+        textcoords='offset points', ha='right', va='bottom',
+        bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
+        arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
+
+
+Projection of classification probabilities
+
+

Remove one layer at the end#

+

The last is often removed before the model is +inserted in a pipeline. Let’s see how to do that. +First, we need the list of output for every node.

+
model_onnx = onnx.load(BytesIO(model_bytes))
+outputs = []
+for node in model_onnx.graph.node:
+    print(node.name, node.output)
+    outputs.extend(node.output)
+
+
+
squeezenet0_conv0_fwd ['squeezenet0_conv0_fwd']
+squeezenet0_relu0_fwd ['squeezenet0_relu0_fwd']
+squeezenet0_pool0_fwd ['squeezenet0_pool0_fwd']
+squeezenet0_conv1_fwd ['squeezenet0_conv1_fwd']
+squeezenet0_relu1_fwd ['squeezenet0_relu1_fwd']
+squeezenet0_conv2_fwd ['squeezenet0_conv2_fwd']
+squeezenet0_relu2_fwd ['squeezenet0_relu2_fwd']
+squeezenet0_conv3_fwd ['squeezenet0_conv3_fwd']
+squeezenet0_relu3_fwd ['squeezenet0_relu3_fwd']
+squeezenet0_concat0 ['squeezenet0_concat0']
+squeezenet0_conv4_fwd ['squeezenet0_conv4_fwd']
+squeezenet0_relu4_fwd ['squeezenet0_relu4_fwd']
+squeezenet0_conv5_fwd ['squeezenet0_conv5_fwd']
+squeezenet0_relu5_fwd ['squeezenet0_relu5_fwd']
+squeezenet0_conv6_fwd ['squeezenet0_conv6_fwd']
+squeezenet0_relu6_fwd ['squeezenet0_relu6_fwd']
+squeezenet0_concat1 ['squeezenet0_concat1']
+squeezenet0_pool1_fwd ['squeezenet0_pool1_fwd']
+squeezenet0_conv7_fwd ['squeezenet0_conv7_fwd']
+squeezenet0_relu7_fwd ['squeezenet0_relu7_fwd']
+squeezenet0_conv8_fwd ['squeezenet0_conv8_fwd']
+squeezenet0_relu8_fwd ['squeezenet0_relu8_fwd']
+squeezenet0_conv9_fwd ['squeezenet0_conv9_fwd']
+squeezenet0_relu9_fwd ['squeezenet0_relu9_fwd']
+squeezenet0_concat2 ['squeezenet0_concat2']
+squeezenet0_conv10_fwd ['squeezenet0_conv10_fwd']
+squeezenet0_relu10_fwd ['squeezenet0_relu10_fwd']
+squeezenet0_conv11_fwd ['squeezenet0_conv11_fwd']
+squeezenet0_relu11_fwd ['squeezenet0_relu11_fwd']
+squeezenet0_conv12_fwd ['squeezenet0_conv12_fwd']
+squeezenet0_relu12_fwd ['squeezenet0_relu12_fwd']
+squeezenet0_concat3 ['squeezenet0_concat3']
+squeezenet0_pool2_fwd ['squeezenet0_pool2_fwd']
+squeezenet0_conv13_fwd ['squeezenet0_conv13_fwd']
+squeezenet0_relu13_fwd ['squeezenet0_relu13_fwd']
+squeezenet0_conv14_fwd ['squeezenet0_conv14_fwd']
+squeezenet0_relu14_fwd ['squeezenet0_relu14_fwd']
+squeezenet0_conv15_fwd ['squeezenet0_conv15_fwd']
+squeezenet0_relu15_fwd ['squeezenet0_relu15_fwd']
+squeezenet0_concat4 ['squeezenet0_concat4']
+squeezenet0_conv16_fwd ['squeezenet0_conv16_fwd']
+squeezenet0_relu16_fwd ['squeezenet0_relu16_fwd']
+squeezenet0_conv17_fwd ['squeezenet0_conv17_fwd']
+squeezenet0_relu17_fwd ['squeezenet0_relu17_fwd']
+squeezenet0_conv18_fwd ['squeezenet0_conv18_fwd']
+squeezenet0_relu18_fwd ['squeezenet0_relu18_fwd']
+squeezenet0_concat5 ['squeezenet0_concat5']
+squeezenet0_conv19_fwd ['squeezenet0_conv19_fwd']
+squeezenet0_relu19_fwd ['squeezenet0_relu19_fwd']
+squeezenet0_conv20_fwd ['squeezenet0_conv20_fwd']
+squeezenet0_relu20_fwd ['squeezenet0_relu20_fwd']
+squeezenet0_conv21_fwd ['squeezenet0_conv21_fwd']
+squeezenet0_relu21_fwd ['squeezenet0_relu21_fwd']
+squeezenet0_concat6 ['squeezenet0_concat6']
+squeezenet0_conv22_fwd ['squeezenet0_conv22_fwd']
+squeezenet0_relu22_fwd ['squeezenet0_relu22_fwd']
+squeezenet0_conv23_fwd ['squeezenet0_conv23_fwd']
+squeezenet0_relu23_fwd ['squeezenet0_relu23_fwd']
+squeezenet0_conv24_fwd ['squeezenet0_conv24_fwd']
+squeezenet0_relu24_fwd ['squeezenet0_relu24_fwd']
+squeezenet0_concat7 ['squeezenet0_concat7']
+squeezenet0_dropout0_fwd ['squeezenet0_dropout0_fwd']
+squeezenet0_conv25_fwd ['squeezenet0_conv25_fwd']
+squeezenet0_relu25_fwd ['squeezenet0_relu25_fwd']
+squeezenet0_pool3_fwd ['squeezenet0_pool3_fwd']
+squeezenet0_flatten0_reshape0 ['squeezenet0_flatten0_reshape0']
+
+
+

We select one of the last one.

+
selected = outputs[-3]
+print("selected", selected)
+
+
+
selected squeezenet0_relu25_fwd
+
+
+

And we tell OnnxTransformer to use that +specific one and to flatten the output +as the dimension is not a matrix.

+
pipe2 = Pipeline(steps=[
+    ('deep', OnnxTransformer(
+        model_bytes, runtime='onnxruntime1', change_batch_size=0,
+        output_name=selected, reshape=True)),
+    ('pca', PCA(2))
+])
+
+try:
+    pipe2.fit(X_train)
+except InvalidArgument as e:
+    print("Unable to fit due to", e)
+
+
+

We check that it is different. +The following values are the shape of the +PCA components. The number of column is the number +of dimensions of the outputs of the transfered +neural network.

+
print(pipe.steps[1][1].components_.shape,
+      pipe2.steps[1][1].components_.shape)
+
+
+
(2, 1000) (2, 169000)
+
+
+

Graph again.

+
proj2 = pipe2.transform(X_train)
+
+fig, ax = plt.subplots(1, 1, figsize=(5, 5))
+ax.plot(proj2[:, 0], proj2[:, 1], 'o')
+ax.set_title("Second projection of classification probabilities")
+text = ["%1.0f-%s" % (el[0], el[1][0][1]) for el in climgs]
+for label, x, y in zip(text, proj2[:, 0], proj2[:, 1]):
+    ax.annotate(
+        label, xy=(x, y), xytext=(-10, 10), fontsize=8,
+        textcoords='offset points', ha='right', va='bottom',
+        bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
+        arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
+
+
+Second projection of classification probabilities

Total running time of the script: ( 0 minutes 2.727 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gconverting.html b/auto_tutorial/plot_gconverting.html index 156efbf5e..89862e384 100644 --- a/auto_tutorial/plot_gconverting.html +++ b/auto_tutorial/plot_gconverting.html @@ -1,630 +1,493 @@ - - - - - - - - - Modify the ONNX graph — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Modify the ONNX graph#

-

This example shows how to change the default ONNX graph such as -renaming the inputs or outputs names.

- -
-

Basic example#

-
import numpy
-from onnxruntime import InferenceSession
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
-from sklearn.linear_model import LogisticRegression
-from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
-from skl2onnx import to_onnx
-
-iris = load_iris()
-X, y = iris.data, iris.target
-X = X.astype(numpy.float32)
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-
-clr = LogisticRegression(solver="liblinear")
-clr.fit(X_train, y_train)
-
-
-onx = to_onnx(clr, X, options={'zipmap': False})
-
-sess = InferenceSession(onx.SerializeToString())
-input_names = [i.name for i in sess.get_inputs()]
-output_names = [o.name for o in sess.get_outputs()]
-print("inputs=%r, outputs=%r" % (input_names, output_names))
-print(sess.run(None, {input_names[0]: X_test[:2]}))
-
-
-

Out:

-
inputs=['X'], outputs=['label', 'probabilities']
-[array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05],
-       [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)]
-
-
-
-
-

Changes the input names#

-

It is possible to change the input name by using the -parameter initial_types. However, the user must specify the input -types as well.

-
onx = to_onnx(clr, X, options={'zipmap': False},
-              initial_types=[('X56', FloatTensorType([None, X.shape[1]]))])
-
-sess = InferenceSession(onx.SerializeToString())
-input_names = [i.name for i in sess.get_inputs()]
-output_names = [o.name for o in sess.get_outputs()]
-print("inputs=%r, outputs=%r" % (input_names, output_names))
-print(sess.run(None, {input_names[0]: X_test[:2]}))
-
-
-

Out:

-
inputs=['X56'], outputs=['label', 'probabilities']
-[array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05],
-       [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)]
-
-
-
-
-

Changes the output names#

-

It is possible to change the input name by using the -parameter final_types.

-
onx = to_onnx(clr, X, options={'zipmap': False},
-              final_types=[('L', Int64TensorType([None])),
-                           ('P', FloatTensorType([None, 3]))])
-
-sess = InferenceSession(onx.SerializeToString())
-input_names = [i.name for i in sess.get_inputs()]
-output_names = [o.name for o in sess.get_outputs()]
-print("inputs=%r, outputs=%r" % (input_names, output_names))
-print(sess.run(None, {input_names[0]: X_test[:2]}))
-
-
-

Out:

-
inputs=['X'], outputs=['L', 'P']
-[array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05],
-       [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)]
-
-
-
-
-

Renaming intermediate results#

-

It is possible to rename intermediate results by using a prefix -or by using a function. The result will be post-processed in order -to unique names. It does not impact the graph inputs or outputs.

-
def rename_results(proposed_name, existing_names):
-    result = "_" + proposed_name.upper()
-    while result in existing_names:
-        result += "A"
-    print("changed %r into %r." % (proposed_name, result))
-    return result
-
-
-onx = to_onnx(clr, X, options={'zipmap': False},
-              naming=rename_results)
-
-sess = InferenceSession(onx.SerializeToString())
-input_names = [i.name for i in sess.get_inputs()]
-output_names = [o.name for o in sess.get_outputs()]
-print("inputs=%r, outputs=%r" % (input_names, output_names))
-print(sess.run(None, {input_names[0]: X_test[:2]}))
-
-
-

Out:

-
changed 'SklearnLinearClassifier' into '_SKLEARNLINEARCLASSIFIER'.
-changed 'label' into '_LABEL'.
-changed 'probabilities' into '_PROBABILITIES'.
-changed 'LinearClassifier' into '_LINEARCLASSIFIER'.
-changed 'probability_tensor' into '_PROBABILITY_TENSOR'.
-changed 'Normalizer' into '_NORMALIZER'.
-inputs=['X'], outputs=['label', 'probabilities']
-[array([0, 2], dtype=int64), array([[8.6287963e-01, 1.3707860e-01, 4.1689775e-05],
-       [5.3355563e-04, 3.1686544e-01, 6.8260098e-01]], dtype=float32)]
-
-
-

Total running time of the script: ( 0 minutes 0.103 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Modify the ONNX graph - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Modify the ONNX graph#

+

This example shows how to change the default ONNX graph such as +renaming the inputs or outputs names.

+
+

Basic example#

+
import numpy
+from onnxruntime import InferenceSession
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+from sklearn.linear_model import LogisticRegression
+from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
+from skl2onnx import to_onnx
+
+iris = load_iris()
+X, y = iris.data, iris.target
+X = X.astype(numpy.float32)
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+clr = LogisticRegression(solver="liblinear")
+clr.fit(X_train, y_train)
+
+
+onx = to_onnx(clr, X, options={'zipmap': False},
+              target_opset=15)
+
+sess = InferenceSession(onx.SerializeToString())
+input_names = [i.name for i in sess.get_inputs()]
+output_names = [o.name for o in sess.get_outputs()]
+print("inputs=%r, outputs=%r" % (input_names, output_names))
+print(sess.run(None, {input_names[0]: X_test[:2]}))
+
+
+
inputs=['X'], outputs=['label', 'probabilities']
+[array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406],
+       [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)]
+
+
+
+
+

Changes the input names#

+

It is possible to change the input name by using the +parameter initial_types. However, the user must specify the input +types as well.

+
onx = to_onnx(clr, X, options={'zipmap': False},
+              initial_types=[('X56', FloatTensorType([None, X.shape[1]]))],
+              target_opset=15)
+
+sess = InferenceSession(onx.SerializeToString())
+input_names = [i.name for i in sess.get_inputs()]
+output_names = [o.name for o in sess.get_outputs()]
+print("inputs=%r, outputs=%r" % (input_names, output_names))
+print(sess.run(None, {input_names[0]: X_test[:2]}))
+
+
+
inputs=['X56'], outputs=['label', 'probabilities']
+[array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406],
+       [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)]
+
+
+
+
+

Changes the output names#

+

It is possible to change the input name by using the +parameter final_types.

+
onx = to_onnx(clr, X, options={'zipmap': False},
+              final_types=[('L', Int64TensorType([None])),
+                           ('P', FloatTensorType([None, 3]))],
+              target_opset=15)
+
+sess = InferenceSession(onx.SerializeToString())
+input_names = [i.name for i in sess.get_inputs()]
+output_names = [o.name for o in sess.get_outputs()]
+print("inputs=%r, outputs=%r" % (input_names, output_names))
+print(sess.run(None, {input_names[0]: X_test[:2]}))
+
+
+
inputs=['X'], outputs=['L', 'P']
+[array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406],
+       [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)]
+
+
+
+
+

Renaming intermediate results#

+

It is possible to rename intermediate results by using a prefix +or by using a function. The result will be post-processed in order +to unique names. It does not impact the graph inputs or outputs.

+
def rename_results(proposed_name, existing_names):
+    result = "_" + proposed_name.upper()
+    while result in existing_names:
+        result += "A"
+    print("changed %r into %r." % (proposed_name, result))
+    return result
+
+
+onx = to_onnx(clr, X, options={'zipmap': False},
+              naming=rename_results, target_opset=15)
+
+sess = InferenceSession(onx.SerializeToString())
+input_names = [i.name for i in sess.get_inputs()]
+output_names = [o.name for o in sess.get_outputs()]
+print("inputs=%r, outputs=%r" % (input_names, output_names))
+print(sess.run(None, {input_names[0]: X_test[:2]}))
+
+
+
changed 'SklearnLinearClassifier' into '_SKLEARNLINEARCLASSIFIER'.
+changed 'label' into '_LABEL'.
+changed 'probabilities' into '_PROBABILITIES'.
+changed 'LinearClassifier' into '_LINEARCLASSIFIER'.
+changed 'probability_tensor' into '_PROBABILITY_TENSOR'.
+changed 'Normalizer' into '_NORMALIZER'.
+inputs=['X'], outputs=['label', 'probabilities']
+[array([2, 1], dtype=int64), array([[0.00081508, 0.23934081, 0.75984406],
+       [0.01008135, 0.7703535 , 0.21956511]], dtype=float32)]
+
+
+

Total running time of the script: ( 0 minutes 0.063 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gexternal_catboost.html b/auto_tutorial/plot_gexternal_catboost.html new file mode 100644 index 000000000..2adea008d --- /dev/null +++ b/auto_tutorial/plot_gexternal_catboost.html @@ -0,0 +1,549 @@ + + + + + + + + + Convert a pipeline with a CatBoost classifier - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Convert a pipeline with a CatBoost classifier#

+

sklearn-onnx only converts scikit-learn models into ONNX +but many libraries implement scikit-learn API so that their models +can be included in a scikit-learn pipeline. This example considers +a pipeline including a :epkg:`CatBoost` model. sklearn-onnx can convert +the whole pipeline as long as it knows the converter associated to +a CatBoostClassifier. Let’s see how to do it.

+
+

Train a CatBoostClassifier#

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+import numpy
+from onnx.helper import get_attribute_value
+from sklearn.datasets import load_iris
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from mlprodict.onnxrt import OnnxInference
+import onnxruntime as rt
+from skl2onnx import convert_sklearn, update_registered_converter
+from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes  # noqa
+from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, guess_tensor_type
+from skl2onnx._parse import _apply_zipmap, _get_sklearn_operator_name
+from catboost import CatBoostClassifier
+from catboost.utils import convert_to_onnx_object
+
+data = load_iris()
+X = data.data[:, :2]
+y = data.target
+
+ind = numpy.arange(X.shape[0])
+numpy.random.shuffle(ind)
+X = X[ind, :].copy()
+y = y[ind].copy()
+
+pipe = Pipeline([('scaler', StandardScaler()),
+                 ('lgbm', CatBoostClassifier(n_estimators=3))])
+pipe.fit(X, y)
+
+
+
Learning rate set to 0.5
+0:      learn: 0.8233591        total: 54.7ms   remaining: 109ms
+1:      learn: 0.6635820        total: 55.8ms   remaining: 27.9ms
+2:      learn: 0.5885989        total: 56.6ms   remaining: 0us
+
+
+
+
Pipeline(steps=[('scaler', StandardScaler()),
+                ('lgbm',
+                 <catboost.core.CatBoostClassifier object at 0x7ff3015be440>)])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Register the converter for CatBoostClassifier#

+

The model has no converter implemented in sklearn-onnx. +We need to register the one coming from CatBoost itself. +However, the converter does not follow sklearn-onnx design and +needs to be wrapped.

+
def skl2onnx_parser_castboost_classifier(scope, model, inputs,
+                                         custom_parsers=None):
+    options = scope.get_options(model, dict(zipmap=True))
+    no_zipmap = isinstance(options['zipmap'], bool) and not options['zipmap']
+
+    alias = _get_sklearn_operator_name(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+    this_operator.inputs = inputs
+
+    label_variable = scope.declare_local_variable('label', Int64TensorType())
+    prob_dtype = guess_tensor_type(inputs[0].type)
+    probability_tensor_variable = scope.declare_local_variable('probabilities', prob_dtype)
+    this_operator.outputs.append(label_variable)
+    this_operator.outputs.append(probability_tensor_variable)
+    probability_tensor = this_operator.outputs
+
+    if no_zipmap:
+        return probability_tensor
+
+    return _apply_zipmap(options['zipmap'], scope, model,
+                         inputs[0].type, probability_tensor)
+
+
+def skl2onnx_convert_catboost(scope, operator, container):
+    """
+    CatBoost returns an ONNX graph with a single node.
+    This function adds it to the main graph.
+    """
+    onx = convert_to_onnx_object(operator.raw_operator)
+    opsets = {d.domain: d.version for d in onx.opset_import}
+    if '' in opsets and opsets[''] >= container.target_opset:
+        raise RuntimeError(
+            "CatBoost uses an opset more recent than the target one.")
+    if len(onx.graph.initializer) > 0 or len(onx.graph.sparse_initializer) > 0:
+        raise NotImplementedError(
+            "CatBoost returns a model initializers. This option is not implemented yet.")
+    if (len(onx.graph.node) not in (1, 2) or not onx.graph.node[0].op_type.startswith("TreeEnsemble") or
+            (len(onx.graph.node) == 2 and onx.graph.node[1].op_type != "ZipMap")):
+        types = ", ".join(map(lambda n: n.op_type, onx.graph.node))
+        raise NotImplementedError(
+            f"CatBoost returns {len(onx.graph.node)} != 1 (types={types}). "
+            f"This option is not implemented yet.")
+    node = onx.graph.node[0]
+    atts = {}
+    for att in node.attribute:
+        atts[att.name] = get_attribute_value(att)
+    container.add_node(
+        node.op_type, [operator.inputs[0].full_name],
+        [operator.outputs[0].full_name, operator.outputs[1].full_name],
+        op_domain=node.domain, op_version=opsets.get(node.domain, None),
+        **atts)
+
+
+update_registered_converter(
+    CatBoostClassifier,
+    'CatBoostCatBoostClassifier',
+    calculate_linear_classifier_output_shapes,
+    skl2onnx_convert_catboost,
+    parser=skl2onnx_parser_castboost_classifier,
+    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
+
+
+
+
+

Convert#

+
model_onnx = convert_sklearn(
+    pipe, 'pipeline_catboost',
+    [('input', FloatTensorType([None, 2]))],
+    target_opset={'': 12, 'ai.onnx.ml': 2})
+
+# And save.
+with open("pipeline_catboost.onnx", "wb") as f:
+    f.write(model_onnx.SerializeToString())
+
+
+
+
+

Compare the predictions#

+

Predictions with CatBoost.

+
print("predict", pipe.predict(X[:5]))
+print("predict_proba", pipe.predict_proba(X[:1]))
+
+
+
predict [[2]
+ [0]
+ [1]
+ [1]
+ [0]]
+predict_proba [[0.09983726 0.22940648 0.67075626]]
+
+
+

Predictions with onnxruntime.

+
sess = rt.InferenceSession("pipeline_catboost.onnx")
+
+pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
+print("predict", pred_onx[0])
+print("predict_proba", pred_onx[1][:1])
+
+
+
predict [2 0 1 1 0]
+predict_proba [{0: 0.09983726590871811, 1: 0.22940650582313538, 2: 0.6707562804222107}]
+
+
+
+
+

Final graph#

+
oinf = OnnxInference(model_onnx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot gexternal catboost

Total running time of the script: ( 0 minutes 1.134 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gexternal_lightgbm.html b/auto_tutorial/plot_gexternal_lightgbm.html index 9446909ee..34e04fc9d 100644 --- a/auto_tutorial/plot_gexternal_lightgbm.html +++ b/auto_tutorial/plot_gexternal_lightgbm.html @@ -1,622 +1,479 @@ - - - - - - - - - Convert a pipeline with a LightGBM classifier — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Convert a pipeline with a LightGBM classifier#

-

sklearn-onnx only converts scikit-learn models into ONNX -but many libraries implement scikit-learn API so that their models -can be included in a scikit-learn pipeline. This example considers -a pipeline including a LightGBM model. sklearn-onnx can convert -the whole pipeline as long as it knows the converter associated to -a LGBMClassifier. Let’s see how to do it.

- -
-

Train a LightGBM classifier#

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-import onnxruntime as rt
-from skl2onnx import convert_sklearn, update_registered_converter
-from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes  # noqa
-from onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm  # noqa
-from skl2onnx.common.data_types import FloatTensorType
-import numpy
-from sklearn.datasets import load_iris
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-from lightgbm import LGBMClassifier
-
-data = load_iris()
-X = data.data[:, :2]
-y = data.target
-
-ind = numpy.arange(X.shape[0])
-numpy.random.shuffle(ind)
-X = X[ind, :].copy()
-y = y[ind].copy()
-
-pipe = Pipeline([('scaler', StandardScaler()),
-                 ('lgbm', LGBMClassifier(n_estimators=3))])
-pipe.fit(X, y)
-
-
-
-
Pipeline(steps=[('scaler', StandardScaler()),
-                ('lgbm', LGBMClassifier(n_estimators=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

Register the converter for LGBMClassifier#

-

The converter is implemented in onnxmltools: -onnxmltools…LightGbm.py. -and the shape calculator: -onnxmltools…Classifier.py.

-
update_registered_converter(
-    LGBMClassifier, 'LightGbmLGBMClassifier',
-    calculate_linear_classifier_output_shapes, convert_lightgbm,
-    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
-
-
-
-
-

Convert again#

-
model_onnx = convert_sklearn(
-    pipe, 'pipeline_lightgbm',
-    [('input', FloatTensorType([None, 2]))],
-    target_opset={'': 12, 'ai.onnx.ml': 2})
-
-# And save.
-with open("pipeline_lightgbm.onnx", "wb") as f:
-    f.write(model_onnx.SerializeToString())
-
-
-
-
-

Compare the predictions#

-

Predictions with LightGbm.

-
print("predict", pipe.predict(X[:5]))
-print("predict_proba", pipe.predict_proba(X[:1]))
-
-
-

Out:

-
predict [1 2 0 0 0]
-predict_proba [[0.25335584 0.45934348 0.28730068]]
-
-
-

Predictions with onnxruntime.

-
sess = rt.InferenceSession("pipeline_lightgbm.onnx")
-
-pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
-print("predict", pred_onx[0])
-print("predict_proba", pred_onx[1][:1])
-
-
-

Out:

-
predict [1 2 0 0 0]
-predict_proba [{0: 0.25335583090782166, 1: 0.45934349298477173, 2: 0.287300705909729}]
-
-
-
-
-

Final graph#

-
oinf = OnnxInference(model_onnx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot gexternal lightgbm

Total running time of the script: ( 0 minutes 0.510 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Convert a pipeline with a LightGBM classifier - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Convert a pipeline with a LightGBM classifier#

+

sklearn-onnx only converts scikit-learn models into ONNX +but many libraries implement scikit-learn API so that their models +can be included in a scikit-learn pipeline. This example considers +a pipeline including a LightGBM model. sklearn-onnx can convert +the whole pipeline as long as it knows the converter associated to +a LGBMClassifier. Let’s see how to do it.

+
+

Train a LightGBM classifier#

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+import onnxruntime as rt
+from skl2onnx import convert_sklearn, update_registered_converter
+from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes  # noqa
+from onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm  # noqa
+from skl2onnx.common.data_types import FloatTensorType
+import numpy
+from sklearn.datasets import load_iris
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from lightgbm import LGBMClassifier
+
+data = load_iris()
+X = data.data[:, :2]
+y = data.target
+
+ind = numpy.arange(X.shape[0])
+numpy.random.shuffle(ind)
+X = X[ind, :].copy()
+y = y[ind].copy()
+
+pipe = Pipeline([('scaler', StandardScaler()),
+                 ('lgbm', LGBMClassifier(n_estimators=3))])
+pipe.fit(X, y)
+
+
+
+
Pipeline(steps=[('scaler', StandardScaler()),
+                ('lgbm', LGBMClassifier(n_estimators=3))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Register the converter for LGBMClassifier#

+

The converter is implemented in onnxmltools: +onnxmltools…LightGbm.py. +and the shape calculator: +onnxmltools…Classifier.py.

+
update_registered_converter(
+    LGBMClassifier, 'LightGbmLGBMClassifier',
+    calculate_linear_classifier_output_shapes, convert_lightgbm,
+    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
+
+
+
+
+

Convert again#

+
model_onnx = convert_sklearn(
+    pipe, 'pipeline_lightgbm',
+    [('input', FloatTensorType([None, 2]))],
+    target_opset={'': 12, 'ai.onnx.ml': 2})
+
+# And save.
+with open("pipeline_lightgbm.onnx", "wb") as f:
+    f.write(model_onnx.SerializeToString())
+
+
+
+
+

Compare the predictions#

+

Predictions with LightGbm.

+
print("predict", pipe.predict(X[:5]))
+print("predict_proba", pipe.predict_proba(X[:1]))
+
+
+
predict [2 1 1 1 2]
+predict_proba [[0.27391998 0.27510369 0.45097633]]
+
+
+

Predictions with onnxruntime.

+
sess = rt.InferenceSession("pipeline_lightgbm.onnx")
+
+pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
+print("predict", pred_onx[0])
+print("predict_proba", pred_onx[1][:1])
+
+
+
predict [2 1 1 1 2]
+predict_proba [{0: 0.2739199697971344, 1: 0.27510371804237366, 2: 0.4509763717651367}]
+
+
+
+
+

Final graph#

+
oinf = OnnxInference(model_onnx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot gexternal lightgbm

Total running time of the script: ( 0 minutes 0.542 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gexternal_lightgbm_reg.html b/auto_tutorial/plot_gexternal_lightgbm_reg.html index dfcb769d5..18de1479a 100644 --- a/auto_tutorial/plot_gexternal_lightgbm_reg.html +++ b/auto_tutorial/plot_gexternal_lightgbm_reg.html @@ -1,729 +1,581 @@ - - - - - - - - - Convert a pipeline with a LightGBM regressor — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Convert a pipeline with a LightGBM regressor#

-

The discrepancies observed when using float and TreeEnsemble operator -(see Issues when switching to float) -explains why the converter for LGBMRegressor may introduce significant -discrepancies even when it is used with float tensors.

-

Library lightgbm is implemented with double. A random forest regressor -with multiple trees computes its prediction by adding the prediction of -every tree. After being converting into ONNX, this summation becomes -\left[\sum\right]_{i=1}^F float(T_i(x)), -where F is the number of trees in the forest, -T_i(x) the output of tree i and \left[\sum\right] -a float addition. The discrepancy can be expressed as -D(x) = |\left[\sum\right]_{i=1}^F float(T_i(x)) - -\sum_{i=1}^F T_i(x)|. -This grows with the number of trees in the forest.

-

To reduce the impact, an option was added to split the node -TreeEnsembleRegressor into multiple ones and to do a summation -with double this time. If we assume the node if split into a nodes, -the discrepancies then become -D'(x) = |\sum_{k=1}^a \left[\sum\right]_{i=1}^{F/a} -float(T_{ak + i}(x)) - \sum_{i=1}^F T_i(x)|.

- -
-

Train a LGBMRegressor#

-
import packaging.version as pv
-import warnings
-import timeit
-import numpy
-from pandas import DataFrame
-import matplotlib.pyplot as plt
-from tqdm import tqdm
-from lightgbm import LGBMRegressor
-from onnxruntime import InferenceSession
-from skl2onnx import to_onnx, update_registered_converter
-from skl2onnx.common.shape_calculator import calculate_linear_regressor_output_shapes  # noqa
-from onnxmltools import __version__ as oml_version
-from onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm  # noqa
-
-
-N = 1000
-X = numpy.random.randn(N, 20)
-y = (numpy.random.randn(N) +
-     numpy.random.randn(N) * 100 * numpy.random.randint(0, 1, 1000))
-
-reg = LGBMRegressor(n_estimators=1000)
-reg.fit(X, y)
-
-
-
-
LGBMRegressor(n_estimators=1000)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
-
-
-
-
-

Register the converter for LGBMClassifier#

-

The converter is implemented in onnxmltools: -onnxmltools…LightGbm.py. -and the shape calculator: -onnxmltools…Regressor.py.

-
def skl2onnx_convert_lightgbm(scope, operator, container):
-    options = scope.get_options(operator.raw_operator)
-    if 'split' in options:
-        if pv.Version(oml_version) < pv.Version('1.9.2'):
-            warnings.warn(
-                "Option split was released in version 1.9.2 but %s is "
-                "installed. It will be ignored." % oml_version)
-        operator.split = options['split']
-    else:
-        operator.split = None
-    convert_lightgbm(scope, operator, container)
-
-
-update_registered_converter(
-    LGBMRegressor, 'LightGbmLGBMRegressor',
-    calculate_linear_regressor_output_shapes,
-    skl2onnx_convert_lightgbm,
-    options={'split': None})
-
-
-
-
-

Convert#

-

We convert the same model following the two scenarios, one single -TreeEnsembleRegressor node, or more. split parameter is the number of -trees per node TreeEnsembleRegressor.

-
model_onnx = to_onnx(reg, X[:1].astype(numpy.float32),
-                     target_opset={'': 14, 'ai.onnx.ml': 2})
-model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),
-                           target_opset={'': 14, 'ai.onnx.ml': 2},
-                           options={'split': 100})
-
-
-
-
-

Discrepancies#

-
sess = InferenceSession(model_onnx.SerializeToString())
-sess_split = InferenceSession(model_onnx_split.SerializeToString())
-
-X32 = X.astype(numpy.float32)
-expected = reg.predict(X32)
-got = sess.run(None, {'X': X32})[0].ravel()
-got_split = sess_split.run(None, {'X': X32})[0].ravel()
-
-disp = numpy.abs(got - expected).sum()
-disp_split = numpy.abs(got_split - expected).sum()
-
-print("sum of discrepancies 1 node", disp)
-print("sum of discrepancies split node",
-      disp_split, "ratio:", disp / disp_split)
-
-
-

Out:

-
sum of discrepancies 1 node 0.00011677806003327501
-sum of discrepancies split node 4.402241789117978e-05 ratio: 2.6526952772549177
-
-
-

The sum of the discrepancies were reduced 4, 5 times. -The maximum is much better too.

-
disc = numpy.abs(got - expected).max()
-disc_split = numpy.abs(got_split - expected).max()
-
-print("max discrepancies 1 node", disc)
-print("max discrepancies split node", disc_split, "ratio:", disc / disc_split)
-
-
-

Out:

-
max discrepancies 1 node 1.0208151959290035e-06
-max discrepancies split node 3.5547470744745624e-07 ratio: 2.8716957199546838
-
-
-
-
-

Processing time#

-

The processing time is slower but not much.

-
print("processing time no split",
-      timeit.timeit(
-        lambda: sess.run(None, {'X': X32})[0], number=150))
-print("processing time split",
-      timeit.timeit(
-        lambda: sess_split.run(None, {'X': X32})[0], number=150))
-
-
-

Out:

-
processing time no split 0.7822519999999997
-processing time split 0.8093972000000065
-
-
-
-
-

Split influence#

-

Let’s see how the sum of the discrepancies moves against -the parameter split.

-
res = []
-for i in tqdm(list(range(20, 170, 20)) + [200, 300, 400, 500]):
-    model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),
-                               target_opset={'': 14, 'ai.onnx.ml': 2},
-                               options={'split': i})
-    sess_split = InferenceSession(model_onnx_split.SerializeToString())
-    got_split = sess_split.run(None, {'X': X32})[0].ravel()
-    disc_split = numpy.abs(got_split - expected).max()
-    res.append(dict(split=i, disc=disc_split))
-
-df = DataFrame(res).set_index('split')
-df["baseline"] = disc
-print(df)
-
-
-

Out:

-
  0%|                                                           | 0/12 [00:00<?, ?it/s]
-  8%|####2                                              | 1/12 [00:02<00:24,  2.25s/it]
- 17%|########5                                          | 2/12 [00:04<00:22,  2.25s/it]
- 25%|############7                                      | 3/12 [00:06<00:19,  2.15s/it]
- 33%|#################                                  | 4/12 [00:08<00:17,  2.17s/it]
- 42%|#####################2                             | 5/12 [00:10<00:14,  2.11s/it]
- 50%|#########################5                         | 6/12 [00:12<00:12,  2.14s/it]
- 58%|#############################7                     | 7/12 [00:14<00:10,  2.09s/it]
- 67%|##################################                 | 8/12 [00:17<00:08,  2.10s/it]
- 75%|######################################2            | 9/12 [00:18<00:06,  2.05s/it]
- 83%|#########################################6        | 10/12 [00:21<00:04,  2.08s/it]
- 92%|#############################################8    | 11/12 [00:23<00:02,  2.04s/it]
-100%|##################################################| 12/12 [00:25<00:00,  2.05s/it]
-100%|##################################################| 12/12 [00:25<00:00,  2.10s/it]
-               disc  baseline
-split
-20     1.730885e-07  0.000001
-40     1.790506e-07  0.000001
-60     2.863737e-07  0.000001
-80     2.863737e-07  0.000001
-100    3.554747e-07  0.000001
-120    4.524478e-07  0.000001
-140    4.174692e-07  0.000001
-160    3.860974e-07  0.000001
-200    3.477843e-07  0.000001
-300    5.396620e-07  0.000001
-400    8.225882e-07  0.000001
-500    6.640952e-07  0.000001
-
-
-

Graph.

-
ax = df.plot(title="Sum of discrepancies against split\n"
-                   "split = number of tree per node")
-
-# plt.show()
-
-
-Sum of discrepancies against split split = number of tree per node

Total running time of the script: ( 0 minutes 31.683 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Convert a pipeline with a LightGBM regressor - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Convert a pipeline with a LightGBM regressor#

+

The discrepancies observed when using float and TreeEnsemble operator +(see Issues when switching to float) +explains why the converter for LGBMRegressor may introduce significant +discrepancies even when it is used with float tensors.

+

Library lightgbm is implemented with double. A random forest regressor +with multiple trees computes its prediction by adding the prediction of +every tree. After being converting into ONNX, this summation becomes +\left[\sum\right]_{i=1}^F float(T_i(x)), +where F is the number of trees in the forest, +T_i(x) the output of tree i and \left[\sum\right] +a float addition. The discrepancy can be expressed as +D(x) = |\left[\sum\right]_{i=1}^F float(T_i(x)) - +\sum_{i=1}^F T_i(x)|. +This grows with the number of trees in the forest.

+

To reduce the impact, an option was added to split the node +TreeEnsembleRegressor into multiple ones and to do a summation +with double this time. If we assume the node if split into a nodes, +the discrepancies then become +D'(x) = |\sum_{k=1}^a \left[\sum\right]_{i=1}^{F/a} +float(T_{ak + i}(x)) - \sum_{i=1}^F T_i(x)|.

+
+

Train a LGBMRegressor#

+
import packaging.version as pv
+import warnings
+import timeit
+import numpy
+from pandas import DataFrame
+import matplotlib.pyplot as plt
+from tqdm import tqdm
+from lightgbm import LGBMRegressor
+from onnxruntime import InferenceSession
+from skl2onnx import to_onnx, update_registered_converter
+from skl2onnx.common.shape_calculator import calculate_linear_regressor_output_shapes  # noqa
+from onnxmltools import __version__ as oml_version
+from onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm  # noqa
+
+
+N = 1000
+X = numpy.random.randn(N, 20)
+y = (numpy.random.randn(N) +
+     numpy.random.randn(N) * 100 * numpy.random.randint(0, 1, 1000))
+
+reg = LGBMRegressor(n_estimators=1000)
+reg.fit(X, y)
+
+
+
+
LGBMRegressor(n_estimators=1000)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+
+
+

Register the converter for LGBMClassifier#

+

The converter is implemented in onnxmltools: +onnxmltools…LightGbm.py. +and the shape calculator: +onnxmltools…Regressor.py.

+
def skl2onnx_convert_lightgbm(scope, operator, container):
+    options = scope.get_options(operator.raw_operator)
+    if 'split' in options:
+        if pv.Version(oml_version) < pv.Version('1.9.2'):
+            warnings.warn(
+                "Option split was released in version 1.9.2 but %s is "
+                "installed. It will be ignored." % oml_version)
+        operator.split = options['split']
+    else:
+        operator.split = None
+    convert_lightgbm(scope, operator, container)
+
+
+update_registered_converter(
+    LGBMRegressor, 'LightGbmLGBMRegressor',
+    calculate_linear_regressor_output_shapes,
+    skl2onnx_convert_lightgbm,
+    options={'split': None})
+
+
+
+
+

Convert#

+

We convert the same model following the two scenarios, one single +TreeEnsembleRegressor node, or more. split parameter is the number of +trees per node TreeEnsembleRegressor.

+
model_onnx = to_onnx(reg, X[:1].astype(numpy.float32),
+                     target_opset={'': 14, 'ai.onnx.ml': 2})
+model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),
+                           target_opset={'': 14, 'ai.onnx.ml': 2},
+                           options={'split': 100})
+
+
+
+
+

Discrepancies#

+
sess = InferenceSession(model_onnx.SerializeToString())
+sess_split = InferenceSession(model_onnx_split.SerializeToString())
+
+X32 = X.astype(numpy.float32)
+expected = reg.predict(X32)
+got = sess.run(None, {'X': X32})[0].ravel()
+got_split = sess_split.run(None, {'X': X32})[0].ravel()
+
+disp = numpy.abs(got - expected).sum()
+disp_split = numpy.abs(got_split - expected).sum()
+
+print("sum of discrepancies 1 node", disp)
+print("sum of discrepancies split node",
+      disp_split, "ratio:", disp / disp_split)
+
+
+
sum of discrepancies 1 node 0.0002170055282193992
+sum of discrepancies split node 4.3252795611109084e-05 ratio: 5.01714456033134
+
+
+

The sum of the discrepancies were reduced 4, 5 times. +The maximum is much better too.

+
disc = numpy.abs(got - expected).max()
+disc_split = numpy.abs(got_split - expected).max()
+
+print("max discrepancies 1 node", disc)
+print("max discrepancies split node", disc_split, "ratio:", disc / disc_split)
+
+
+
max discrepancies 1 node 1.7663594098493718e-06
+max discrepancies split node 3.081887920419746e-07 ratio: 5.731420010915899
+
+
+
+
+

Processing time#

+

The processing time is slower but not much.

+
print("processing time no split",
+      timeit.timeit(
+          lambda: sess.run(None, {'X': X32})[0], number=150))
+print("processing time split",
+      timeit.timeit(
+          lambda: sess_split.run(None, {'X': X32})[0], number=150))
+
+
+
processing time no split 2.637800576000245
+processing time split 2.7089149429998542
+
+
+
+
+

Split influence#

+

Let’s see how the sum of the discrepancies moves against +the parameter split.

+
res = []
+for i in tqdm(list(range(20, 170, 20)) + [200, 300, 400, 500]):
+    model_onnx_split = to_onnx(reg, X[:1].astype(numpy.float32),
+                               target_opset={'': 14, 'ai.onnx.ml': 2},
+                               options={'split': i})
+    sess_split = InferenceSession(model_onnx_split.SerializeToString())
+    got_split = sess_split.run(None, {'X': X32})[0].ravel()
+    disc_split = numpy.abs(got_split - expected).max()
+    res.append(dict(split=i, disc=disc_split))
+
+df = DataFrame(res).set_index('split')
+df["baseline"] = disc
+print(df)
+
+
+
  0%|          | 0/12 [00:00<?, ?it/s]
+  8%|8         | 1/12 [00:02<00:27,  2.51s/it]
+ 17%|#6        | 2/12 [00:04<00:22,  2.26s/it]
+ 25%|##5       | 3/12 [00:06<00:19,  2.17s/it]
+ 33%|###3      | 4/12 [00:08<00:17,  2.13s/it]
+ 42%|####1     | 5/12 [00:11<00:16,  2.31s/it]
+ 50%|#####     | 6/12 [00:13<00:14,  2.35s/it]
+ 58%|#####8    | 7/12 [00:16<00:11,  2.31s/it]
+ 67%|######6   | 8/12 [00:19<00:10,  2.54s/it]
+ 75%|#######5  | 9/12 [00:21<00:07,  2.57s/it]
+ 83%|########3 | 10/12 [00:24<00:05,  2.73s/it]
+ 92%|#########1| 11/12 [00:27<00:02,  2.61s/it]
+100%|##########| 12/12 [00:29<00:00,  2.47s/it]
+100%|##########| 12/12 [00:29<00:00,  2.44s/it]
+               disc  baseline
+split
+20     2.396380e-07  0.000002
+40     2.127739e-07  0.000002
+60     2.453829e-07  0.000002
+80     3.785900e-07  0.000002
+100    3.081888e-07  0.000002
+120    3.457641e-07  0.000002
+140    3.081888e-07  0.000002
+160    4.130784e-07  0.000002
+200    4.680156e-07  0.000002
+300    6.390029e-07  0.000002
+400    1.001937e-06  0.000002
+500    1.001937e-06  0.000002
+
+
+

Graph.

+
_, ax = plt.subplots(1, 1)
+df.plot(title="Sum of discrepancies against split\n"
+              "split = number of tree per node",
+        ax=ax)
+
+# plt.show()
+
+
+Sum of discrepancies against split split = number of tree per node

Total running time of the script: ( 0 minutes 39.867 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_gexternal_xgboost.html b/auto_tutorial/plot_gexternal_xgboost.html index c51eb7ef6..d45231dfb 100644 --- a/auto_tutorial/plot_gexternal_xgboost.html +++ b/auto_tutorial/plot_gexternal_xgboost.html @@ -1,737 +1,574 @@ - - - - - - - - - Convert a pipeline with a XGBoost model — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Convert a pipeline with a XGBoost model#

-

sklearn-onnx only converts scikit-learn models -into ONNX but many libraries implement scikit-learn -API so that their models can be included in a scikit-learn -pipeline. This example considers a pipeline including a XGBoost -model. sklearn-onnx can convert the whole pipeline as long as -it knows the converter associated to a XGBClassifier. Let’s see -how to do it.

- -
-

Train a XGBoost classifier#

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-import numpy
-import onnxruntime as rt
-from sklearn.datasets import load_iris, load_diabetes, make_classification
-from sklearn.model_selection import train_test_split
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-from xgboost import XGBClassifier, XGBRegressor, DMatrix, train as train_xgb
-from skl2onnx.common.data_types import FloatTensorType
-from skl2onnx import convert_sklearn, to_onnx, update_registered_converter
-from skl2onnx.common.shape_calculator import (
-    calculate_linear_classifier_output_shapes,
-    calculate_linear_regressor_output_shapes)
-from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
-    convert_xgboost)
-from onnxmltools.convert import convert_xgboost as convert_xgboost_booster
-
-
-data = load_iris()
-X = data.data[:, :2]
-y = data.target
-
-ind = numpy.arange(X.shape[0])
-numpy.random.shuffle(ind)
-X = X[ind, :].copy()
-y = y[ind].copy()
-
-pipe = Pipeline([('scaler', StandardScaler()),
-                 ('xgb', XGBClassifier(n_estimators=3))])
-pipe.fit(X, y)
-
-# The conversion fails but it is expected.
-
-try:
-    convert_sklearn(pipe, 'pipeline_xgboost',
-                    [('input', FloatTensorType([None, 2]))],
-                    target_opset={'': 12, 'ai.onnx.ml': 2})
-except Exception as e:
-    print(e)
-
-# The error message tells no converter was found
-# for :epkg:`XGBoost` models. By default, :epkg:`sklearn-onnx`
-# only handles models from :epkg:`scikit-learn` but it can
-# be extended to every model following :epkg:`scikit-learn`
-# API as long as the module knows there exists a converter
-# for every model used in a pipeline. That's why
-# we need to register a converter.
-
-
-

Out:

-
D:\Program Files\Python\Python39\lib\site-packages\xgboost\sklearn.py:1224: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].
-  warnings.warn(label_encoder_deprecation_msg, UserWarning)
-[14:44:07] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-
-
-
-
-

Register the converter for XGBClassifier#

-

The converter is implemented in onnxmltools: -onnxmltools…XGBoost.py. -and the shape calculator: -onnxmltools…Classifier.py.

-
update_registered_converter(
-    XGBClassifier, 'XGBoostXGBClassifier',
-    calculate_linear_classifier_output_shapes, convert_xgboost,
-    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
-
-
-
-
-

Convert again#

-
model_onnx = convert_sklearn(
-    pipe, 'pipeline_xgboost',
-    [('input', FloatTensorType([None, 2]))],
-    target_opset={'': 12, 'ai.onnx.ml': 2})
-
-# And save.
-with open("pipeline_xgboost.onnx", "wb") as f:
-    f.write(model_onnx.SerializeToString())
-
-
-
-
-

Compare the predictions#

-

Predictions with XGBoost.

-
print("predict", pipe.predict(X[:5]))
-print("predict_proba", pipe.predict_proba(X[:1]))
-
-
-

Out:

-
predict [1 0 2 2 0]
-predict_proba [[0.18270978 0.63138163 0.18590865]]
-
-
-

Predictions with onnxruntime.

-
sess = rt.InferenceSession("pipeline_xgboost.onnx")
-pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
-print("predict", pred_onx[0])
-print("predict_proba", pred_onx[1][:1])
-
-
-

Out:

-
predict [1 0 2 2 0]
-predict_proba [{0: 0.18270978331565857, 1: 0.631381630897522, 2: 0.18590864539146423}]
-
-
-
-
-

Final graph#

-
oinf = OnnxInference(model_onnx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot gexternal xgboost
-
-

Same example with XGBRegressor#

-
update_registered_converter(
-    XGBRegressor, 'XGBoostXGBRegressor',
-    calculate_linear_regressor_output_shapes, convert_xgboost)
-
-
-data = load_diabetes()
-x = data.data
-y = data.target
-X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5)
-
-pipe = Pipeline([('scaler', StandardScaler()),
-                 ('xgb', XGBRegressor(n_estimators=3))])
-pipe.fit(X_train, y_train)
-
-print("predict", pipe.predict(X_test[:5]))
-
-
-

Out:

-
predict [142.53963   62.165913  77.709404  58.369633 108.54605 ]
-
-
-

ONNX

-
onx = to_onnx(pipe, X_train.astype(numpy.float32),
-              target_opset={'': 12, 'ai.onnx.ml': 2})
-
-sess = rt.InferenceSession(onx.SerializeToString())
-pred_onx = sess.run(None, {"X": X_test[:5].astype(numpy.float32)})
-print("predict", pred_onx[0].ravel())
-
-
-

Out:

-
predict [142.53963   62.165913  77.709404  58.369633 108.54605 ]
-
-
-

Some discrepencies may appear. In that case, -you should read Issues when switching to float.

-
-
-

Same with a Booster#

-

A booster cannot be inserted in a pipeline. It requires -a different conversion function because it does not -follow scikit-learn API.

-
x, y = make_classification(n_classes=2, n_features=5,
-                           n_samples=100,
-                           random_state=42, n_informative=3)
-X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5,
-                                               random_state=42)
-
-dtrain = DMatrix(X_train, label=y_train)
-
-param = {'objective': 'multi:softmax', 'num_class': 3}
-bst = train_xgb(param, dtrain, 10)
-
-initial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]
-
-try:
-    onx = convert_xgboost_booster(bst, "name", initial_types=initial_type)
-    cont = True
-except AssertionError as e:
-    print("XGBoost is too recent or onnxmltools too old.", e)
-    cont = False
-
-if cont:
-    sess = rt.InferenceSession(onx.SerializeToString())
-    input_name = sess.get_inputs()[0].name
-    label_name = sess.get_outputs()[0].name
-    pred_onx = sess.run(
-        [label_name], {input_name: X_test.astype(numpy.float32)})[0]
-    print(pred_onx)
-
-
-

Out:

-
[14:44:08] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softmax' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-[0 0 1 1 0 1 0 1 0 1 0 0 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 1 1 1 0 1 1 0 1 1 1
- 0 1 1 1 0 0 1 1 0 0 0 1 0]
-
-
-

Total running time of the script: ( 0 minutes 0.586 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Convert a pipeline with a XGBoost model - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Convert a pipeline with a XGBoost model#

+

sklearn-onnx only converts scikit-learn models +into ONNX but many libraries implement scikit-learn +API so that their models can be included in a scikit-learn +pipeline. This example considers a pipeline including a XGBoost +model. sklearn-onnx can convert the whole pipeline as long as +it knows the converter associated to a XGBClassifier. Let’s see +how to do it.

+
+

Train a XGBoost classifier#

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+import numpy
+import onnxruntime as rt
+from sklearn.datasets import load_iris, load_diabetes, make_classification
+from sklearn.model_selection import train_test_split
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from xgboost import XGBClassifier, XGBRegressor, DMatrix, train as train_xgb
+from skl2onnx.common.data_types import FloatTensorType
+from skl2onnx import convert_sklearn, to_onnx, update_registered_converter
+from skl2onnx.common.shape_calculator import (
+    calculate_linear_classifier_output_shapes,
+    calculate_linear_regressor_output_shapes)
+from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
+    convert_xgboost)
+from onnxmltools.convert import convert_xgboost as convert_xgboost_booster
+
+
+data = load_iris()
+X = data.data[:, :2]
+y = data.target
+
+ind = numpy.arange(X.shape[0])
+numpy.random.shuffle(ind)
+X = X[ind, :].copy()
+y = y[ind].copy()
+
+pipe = Pipeline([('scaler', StandardScaler()),
+                 ('xgb', XGBClassifier(n_estimators=3))])
+pipe.fit(X, y)
+
+# The conversion fails but it is expected.
+
+try:
+    convert_sklearn(pipe, 'pipeline_xgboost',
+                    [('input', FloatTensorType([None, 2]))],
+                    target_opset={'': 12, 'ai.onnx.ml': 2})
+except Exception as e:
+    print(e)
+
+# The error message tells no converter was found
+# for :epkg:`XGBoost` models. By default, :epkg:`sklearn-onnx`
+# only handles models from :epkg:`scikit-learn` but it can
+# be extended to every model following :epkg:`scikit-learn`
+# API as long as the module knows there exists a converter
+# for every model used in a pipeline. That's why
+# we need to register a converter.
+
+
+
+
+

Register the converter for XGBClassifier#

+

The converter is implemented in onnxmltools: +onnxmltools…XGBoost.py. +and the shape calculator: +onnxmltools…Classifier.py.

+
update_registered_converter(
+    XGBClassifier, 'XGBoostXGBClassifier',
+    calculate_linear_classifier_output_shapes, convert_xgboost,
+    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
+
+
+
+
+

Convert again#

+
model_onnx = convert_sklearn(
+    pipe, 'pipeline_xgboost',
+    [('input', FloatTensorType([None, 2]))],
+    target_opset={'': 12, 'ai.onnx.ml': 2})
+
+# And save.
+with open("pipeline_xgboost.onnx", "wb") as f:
+    f.write(model_onnx.SerializeToString())
+
+
+
+
+

Compare the predictions#

+

Predictions with XGBoost.

+
print("predict", pipe.predict(X[:5]))
+print("predict_proba", pipe.predict_proba(X[:1]))
+
+
+
predict [1 1 1 2 1]
+predict_proba [[0.1758379  0.43438542 0.3897767 ]]
+
+
+

Predictions with onnxruntime.

+
sess = rt.InferenceSession("pipeline_xgboost.onnx")
+pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
+print("predict", pred_onx[0])
+print("predict_proba", pred_onx[1][:1])
+
+
+
predict [1 1 1 2 1]
+predict_proba [{0: 0.175837904214859, 1: 0.43438541889190674, 2: 0.38977670669555664}]
+
+
+
+
+

Final graph#

+
oinf = OnnxInference(model_onnx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot gexternal xgboost
+
+

Same example with XGBRegressor#

+
update_registered_converter(
+    XGBRegressor, 'XGBoostXGBRegressor',
+    calculate_linear_regressor_output_shapes, convert_xgboost)
+
+
+data = load_diabetes()
+x = data.data
+y = data.target
+X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5)
+
+pipe = Pipeline([('scaler', StandardScaler()),
+                 ('xgb', XGBRegressor(n_estimators=3))])
+pipe.fit(X_train, y_train)
+
+print("predict", pipe.predict(X_test[:5]))
+
+
+
predict [73.99487 44.04601 77.94355 75.82603 44.04601]
+
+
+

ONNX

+
onx = to_onnx(pipe, X_train.astype(numpy.float32),
+              target_opset={'': 12, 'ai.onnx.ml': 2})
+
+sess = rt.InferenceSession(onx.SerializeToString())
+pred_onx = sess.run(None, {"X": X_test[:5].astype(numpy.float32)})
+print("predict", pred_onx[0].ravel())
+
+
+
predict [73.99487 44.04601 77.94355 75.82603 44.04601]
+
+
+

Some discrepencies may appear. In that case, +you should read Issues when switching to float.

+
+
+

Same with a Booster#

+

A booster cannot be inserted in a pipeline. It requires +a different conversion function because it does not +follow scikit-learn API.

+
x, y = make_classification(n_classes=2, n_features=5,
+                           n_samples=100,
+                           random_state=42, n_informative=3)
+X_train, X_test, y_train, _ = train_test_split(x, y, test_size=0.5,
+                                               random_state=42)
+
+dtrain = DMatrix(X_train, label=y_train)
+
+param = {'objective': 'multi:softmax', 'num_class': 3}
+bst = train_xgb(param, dtrain, 10)
+
+initial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))]
+
+try:
+    onx = convert_xgboost_booster(bst, "name", initial_types=initial_type)
+    cont = True
+except AssertionError as e:
+    print("XGBoost is too recent or onnxmltools too old.", e)
+    cont = False
+
+if cont:
+    sess = rt.InferenceSession(onx.SerializeToString())
+    input_name = sess.get_inputs()[0].name
+    label_name = sess.get_outputs()[0].name
+    pred_onx = sess.run(
+        [label_name], {input_name: X_test.astype(numpy.float32)})[0]
+    print(pred_onx)
+
+
+
[0 0 1 1 0 1 0 1 0 1 0 0 1 1 1 0 0 1 1 1 1 0 0 1 0 0 0 1 1 1 0 1 1 0 1 1 1
+ 0 1 1 1 0 0 1 1 0 0 0 1 0]
+
+
+

Total running time of the script: ( 0 minutes 0.570 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_icustom_converter.html b/auto_tutorial/plot_icustom_converter.html index 48a4e0d86..03af53a32 100644 --- a/auto_tutorial/plot_icustom_converter.html +++ b/auto_tutorial/plot_icustom_converter.html @@ -1,764 +1,628 @@ - - - - - - - - - Implement a new converter — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Implement a new converter#

-

By default, sklearn-onnx assumes that a classifier -has two outputs (label and probabilities), a regressor -has one output (prediction), a transform has one output -(the transformed data). This example assumes the model to -convert is one of them. In that case, a new converter requires -in fact two functions:

-
    -
  • a shape calculator: it defines the output shape and type -based on the model and input type,

  • -
  • a converter: it actually builds an ONNX graph equivalent -to the prediction function to be converted.

  • -
-

This example implements both components for a new model.

- -
-

Custom model#

-

Let’s implement a simple custom model using -scikit-learn API. The model is preprocessing -which decorrelates correlated random variables. -If X is a matrix of features, V=\frac{1}{n}X'X -is the covariance matrix. We compute X V^{1/2}.

-
from mlprodict.onnxrt import OnnxInference
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-import pickle
-from io import BytesIO
-import numpy
-from numpy.testing import assert_almost_equal
-from onnxruntime import InferenceSession
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-from skl2onnx.common.data_types import guess_numpy_type
-from skl2onnx import to_onnx
-from skl2onnx import update_registered_converter
-from skl2onnx.algebra.onnx_ops import OnnxMatMul, OnnxSub
-
-
-class DecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.mean_`: average
-    * `self.coef_`: square root of the coveriance matrix
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        if sample_weights is not None:
-            raise NotImplementedError(
-                "sample_weights != None is not implemented.")
-        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
-        X = X - self.mean_
-        V = X.T @ X / X.shape[0]
-        if self.alpha != 0:
-            V += numpy.identity(V.shape[0]) * self.alpha
-        L, P = numpy.linalg.eig(V)
-        Linv = L ** (-0.5)
-        diag = numpy.diag(Linv)
-        root = P @ diag @ P.transpose()
-        self.coef_ = root
-        return self
-
-    def transform(self, X):
-        return (X - self.mean_) @ self.coef_
-
-
-def test_decorrelate_transformer():
-    data = load_iris()
-    X = data.data
-
-    dec = DecorrelateTransformer()
-    dec.fit(X)
-    pred = dec.transform(X)
-    cov = pred.T @ pred
-    cov /= cov[0, 0]
-    assert_almost_equal(numpy.identity(4), cov)
-
-    dec = DecorrelateTransformer(alpha=1e-10)
-    dec.fit(X)
-    pred = dec.transform(X)
-    cov = pred.T @ pred
-    cov /= cov[0, 0]
-    assert_almost_equal(numpy.identity(4), cov)
-
-    st = BytesIO()
-    pickle.dump(dec, st)
-    dec2 = pickle.load(BytesIO(st.getvalue()))
-    assert_almost_equal(dec.mean_, dec2.mean_)
-    assert_almost_equal(dec.coef_, dec2.coef_)
-    assert id(dec.mean_) != id(dec2.mean_)
-    assert id(dec.coef_) != id(dec2.coef_)
-
-
-test_decorrelate_transformer()
-
-data = load_iris()
-X = data.data
-
-dec = DecorrelateTransformer()
-dec.fit(X)
-pred = dec.transform(X[:5])
-print(pred)
-
-
-

Out:

-
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
- [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
- [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
- [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
- [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
-
-
-

Trained coefficients.

-
print(dec.mean_)
-print(dec.coef_)
-
-
-

Out:

-
[[5.84333333 3.05733333 3.758      1.19933333]]
-[[ 2.8040383  -0.94252732 -1.22382017  0.36769632]
- [-0.94252732  3.03632069  0.86741369 -0.52213719]
- [-1.22382017  0.86741369  1.93652687 -2.02453122]
- [ 0.36769632 -0.52213719 -2.02453122  4.83455725]]
-
-
-
-
-

Conversion into ONNX#

-

Let’s try to convert it and see what happens.

-
try:
-    to_onnx(dec, X.astype(numpy.float32))
-except Exception as e:
-    print(e)
-
-
-

Out:

-
Unable to find a shape calculator for type '<class '__main__.DecorrelateTransformer'>'.
-It usually means the pipeline being converted contains a
-transformer or a predictor with no corresponding converter
-implemented in sklearn-onnx. If the converted is implemented
-in another library, you need to register
-the converted so that it can be used by sklearn-onnx (function
-update_registered_converter). If the model is not yet covered
-by sklearn-onnx, you may raise an issue to
-https://github.com/onnx/sklearn-onnx/issues
-to get the converter implemented or even contribute to the
-project. If the model is a custom model, a new converter must
-be implemented. Examples can be found in the gallery.
-
-
-

This error means there is no converter associated -to DecorrelateTransformer. Let’s implement it. -It requires the two following -functions, a shape calculator and a converter -with the same signature as below. -First the shape calculator. We retrieve the input type -add tells the output type has the same type, -the same number of rows and a specific number of columns.

-
def decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    # The shape may be unknown. *get_first_dimension*
-    # returns the appropriate value, None in most cases
-    # meaning the transformer can process any batch of observations.
-    input_dim = operator.inputs[0].get_first_dimension()
-    output_type = input_type([input_dim, op.coef_.shape[1]])
-    operator.outputs[0].type = output_type
-
-
-

The converter. One thing we need to pay attention to -is the target opset. This information is important -to make sure that every node is defined following the -specifications of that opset.

-
def decorrelate_transformer_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    # We retrieve the unique input.
-    X = operator.inputs[0]
-
-    # In most case, computation happen in floats.
-    # But it might be with double. ONNX is very strict
-    # about types, every constant should have the same
-    # type as the input.
-    dtype = guess_numpy_type(X.type)
-
-    # We tell in ONNX language how to compute the unique output.
-    # op_version=opv tells which opset is requested
-    Y = OnnxMatMul(
-        OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
-        op.coef_.astype(dtype),
-        op_version=opv, output_names=out[:1])
-    Y.add_to(scope, container)
-
-
-

We need to let skl2onnx know about the new converter.

-
update_registered_converter(
-    DecorrelateTransformer, "SklearnDecorrelateTransformer",
-    decorrelate_transformer_shape_calculator,
-    decorrelate_transformer_converter)
-
-
-onx = to_onnx(dec, X.astype(numpy.float32))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, got))
-
-
-

Out:

-
(6.046576181972796e-07, 0.0002951417065241126)
-
-
-

Let’s check it works as well with double.

-
onx = to_onnx(dec, X.astype(numpy.float64))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float64))
-got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
-print(diff(exp, got))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-

The differences are smaller with double as expected.

-
-
-

Final graph#

-
oinf = OnnxInference(onx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot icustom converter

Total running time of the script: ( 0 minutes 0.406 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Implement a new converter - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Implement a new converter#

+

By default, sklearn-onnx assumes that a classifier +has two outputs (label and probabilities), a regressor +has one output (prediction), a transform has one output +(the transformed data). This example assumes the model to +convert is one of them. In that case, a new converter requires +in fact two functions:

+
    +
  • a shape calculator: it defines the output shape and type +based on the model and input type,

  • +
  • a converter: it actually builds an ONNX graph equivalent +to the prediction function to be converted.

  • +
+

This example implements both components for a new model.

+
+

Custom model#

+

Let’s implement a simple custom model using +scikit-learn API. The model is preprocessing +which decorrelates correlated random variables. +If X is a matrix of features, V=\frac{1}{n}X'X +is the covariance matrix. We compute X V^{1/2}.

+
from mlprodict.onnxrt import OnnxInference
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+import pickle
+from io import BytesIO
+import numpy
+from numpy.testing import assert_almost_equal
+from onnxruntime import InferenceSession
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+from skl2onnx.common.data_types import guess_numpy_type
+from skl2onnx import to_onnx
+from skl2onnx import update_registered_converter
+from skl2onnx.algebra.onnx_ops import OnnxMatMul, OnnxSub
+
+
+class DecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.mean_`: average
+    * `self.coef_`: square root of the coveriance matrix
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        if sample_weights is not None:
+            raise NotImplementedError(
+                "sample_weights != None is not implemented.")
+        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
+        X = X - self.mean_
+        V = X.T @ X / X.shape[0]
+        if self.alpha != 0:
+            V += numpy.identity(V.shape[0]) * self.alpha
+        L, P = numpy.linalg.eig(V)
+        Linv = L ** (-0.5)
+        diag = numpy.diag(Linv)
+        root = P @ diag @ P.transpose()
+        self.coef_ = root
+        return self
+
+    def transform(self, X):
+        return (X - self.mean_) @ self.coef_
+
+
+def test_decorrelate_transformer():
+    data = load_iris()
+    X = data.data
+
+    dec = DecorrelateTransformer()
+    dec.fit(X)
+    pred = dec.transform(X)
+    cov = pred.T @ pred
+    cov /= cov[0, 0]
+    assert_almost_equal(numpy.identity(4), cov)
+
+    dec = DecorrelateTransformer(alpha=1e-10)
+    dec.fit(X)
+    pred = dec.transform(X)
+    cov = pred.T @ pred
+    cov /= cov[0, 0]
+    assert_almost_equal(numpy.identity(4), cov)
+
+    st = BytesIO()
+    pickle.dump(dec, st)
+    dec2 = pickle.load(BytesIO(st.getvalue()))
+    assert_almost_equal(dec.mean_, dec2.mean_)
+    assert_almost_equal(dec.coef_, dec2.coef_)
+    assert id(dec.mean_) != id(dec2.mean_)
+    assert id(dec.coef_) != id(dec2.coef_)
+
+
+test_decorrelate_transformer()
+
+data = load_iris()
+X = data.data
+
+dec = DecorrelateTransformer()
+dec.fit(X)
+pred = dec.transform(X[:5])
+print(pred)
+
+
+
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
+ [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
+ [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
+ [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
+ [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
+
+
+

Trained coefficients.

+
print(dec.mean_)
+print(dec.coef_)
+
+
+
[[5.84333333 3.05733333 3.758      1.19933333]]
+[[ 2.8040383  -0.94252732 -1.22382017  0.36769632]
+ [-0.94252732  3.03632069  0.86741369 -0.52213719]
+ [-1.22382017  0.86741369  1.93652687 -2.02453122]
+ [ 0.36769632 -0.52213719 -2.02453122  4.83455725]]
+
+
+
+
+

Conversion into ONNX#

+

Let’s try to convert it and see what happens.

+
try:
+    to_onnx(dec, X.astype(numpy.float32))
+except Exception as e:
+    print(e)
+
+
+
Unable to find a shape calculator for type '<class '__main__.DecorrelateTransformer'>'.
+It usually means the pipeline being converted contains a
+transformer or a predictor with no corresponding converter
+implemented in sklearn-onnx. If the converted is implemented
+in another library, you need to register
+the converted so that it can be used by sklearn-onnx (function
+update_registered_converter). If the model is not yet covered
+by sklearn-onnx, you may raise an issue to
+https://github.com/onnx/sklearn-onnx/issues
+to get the converter implemented or even contribute to the
+project. If the model is a custom model, a new converter must
+be implemented. Examples can be found in the gallery.
+
+
+

This error means there is no converter associated +to DecorrelateTransformer. Let’s implement it. +It requires the two following +functions, a shape calculator and a converter +with the same signature as below. +First the shape calculator. We retrieve the input type +add tells the output type has the same type, +the same number of rows and a specific number of columns.

+
def decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    # The shape may be unknown. *get_first_dimension*
+    # returns the appropriate value, None in most cases
+    # meaning the transformer can process any batch of observations.
+    input_dim = operator.inputs[0].get_first_dimension()
+    output_type = input_type([input_dim, op.coef_.shape[1]])
+    operator.outputs[0].type = output_type
+
+
+

The converter. One thing we need to pay attention to +is the target opset. This information is important +to make sure that every node is defined following the +specifications of that opset.

+
def decorrelate_transformer_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    # We retrieve the unique input.
+    X = operator.inputs[0]
+
+    # In most case, computation happen in floats.
+    # But it might be with double. ONNX is very strict
+    # about types, every constant should have the same
+    # type as the input.
+    dtype = guess_numpy_type(X.type)
+
+    # We tell in ONNX language how to compute the unique output.
+    # op_version=opv tells which opset is requested
+    Y = OnnxMatMul(
+        OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
+        op.coef_.astype(dtype),
+        op_version=opv, output_names=out[:1])
+    Y.add_to(scope, container)
+
+
+

We need to let skl2onnx know about the new converter.

+
update_registered_converter(
+    DecorrelateTransformer, "SklearnDecorrelateTransformer",
+    decorrelate_transformer_shape_calculator,
+    decorrelate_transformer_converter)
+
+
+onx = to_onnx(dec, X.astype(numpy.float32))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, got))
+
+
+
(6.04657619085458e-07, 0.0002951417065406967)
+
+
+

Let’s check it works as well with double.

+
onx = to_onnx(dec, X.astype(numpy.float64))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float64))
+got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
+print(diff(exp, got))
+
+
+
(0.0, 0.0)
+
+
+

The differences are smaller with double as expected.

+
+
+

Final graph#

+
oinf = OnnxInference(onx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot icustom converter

Total running time of the script: ( 0 minutes 0.321 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_jcustom_syntax.html b/auto_tutorial/plot_jcustom_syntax.html index 996d3c7d1..332dd3e15 100644 --- a/auto_tutorial/plot_jcustom_syntax.html +++ b/auto_tutorial/plot_jcustom_syntax.html @@ -1,676 +1,547 @@ - - - - - - - - - Two ways to implement a converter — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Two ways to implement a converter#

-

There are two ways to write a converter. The first one -is less verbose and easier to understand -(see k_means.py). -The other is very verbose (see ada_boost.py -for an example).

-

The first way is used in Implement a new converter. -This one demonstrates the second way which is usually the one -used in other converter library. It is more verbose.

- -
-

Custom model#

-

It basically copies what is in example -:ref:`l-plot-custom-converter.

-
from skl2onnx.common.data_types import guess_proto_type
-from onnxconverter_common.onnx_ops import apply_sub
-from onnxruntime import InferenceSession
-from skl2onnx import update_registered_converter
-from skl2onnx import to_onnx
-import numpy
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-
-
-class DecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.mean_`: average
-    * `self.coef_`: square root of the coveriance matrix
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        if sample_weights is not None:
-            raise NotImplementedError(
-                "sample_weights != None is not implemented.")
-        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
-        X = X - self.mean_
-        V = X.T @ X / X.shape[0]
-        if self.alpha != 0:
-            V += numpy.identity(V.shape[0]) * self.alpha
-        L, P = numpy.linalg.eig(V)
-        Linv = L ** (-0.5)
-        diag = numpy.diag(Linv)
-        root = P @ diag @ P.transpose()
-        self.coef_ = root
-        return self
-
-    def transform(self, X):
-        return (X - self.mean_) @ self.coef_
-
-
-data = load_iris()
-X = data.data
-
-dec = DecorrelateTransformer()
-dec.fit(X)
-pred = dec.transform(X[:5])
-print(pred)
-
-
-

Out:

-
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
- [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
- [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
- [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
- [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
-
-
-
-
-

Conversion into ONNX#

-

The shape calculator does not change.

-
def decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    # The shape may be unknown. *get_first_dimension*
-    # returns the appropriate value, None in most cases
-    # meaning the transformer can process any batch of observations.
-    input_dim = operator.inputs[0].get_first_dimension()
-    output_type = input_type([input_dim, op.coef_.shape[1]])
-    operator.outputs[0].type = output_type
-
-
-

The converter is different.

-
def decorrelate_transformer_converter(scope, operator, container):
-    op = operator.raw_operator
-    out = operator.outputs
-
-    # We retrieve the unique input.
-    X = operator.inputs[0]
-
-    # In most case, computation happen in floats.
-    # But it might be with double. ONNX is very strict
-    # about types, every constant should have the same
-    # type as the input.
-    proto_dtype = guess_proto_type(X.type)
-
-    mean_name = scope.get_unique_variable_name('mean')
-    container.add_initializer(mean_name, proto_dtype,
-                              op.mean_.shape, list(op.mean_.ravel()))
-
-    coef_name = scope.get_unique_variable_name('coef')
-    container.add_initializer(coef_name, proto_dtype,
-                              op.coef_.shape, list(op.coef_.ravel()))
-
-    op_name = scope.get_unique_operator_name('sub')
-    sub_name = scope.get_unique_variable_name('sub')
-    # This function is defined in package onnxconverter_common.
-    # Most common operators can be added to the graph with
-    # these functions. It handles the case when specifications
-    # changed accross opsets (a parameter becomes an input
-    # for example).
-    apply_sub(scope, [X.full_name, mean_name], sub_name, container,
-              operator_name=op_name)
-
-    op_name = scope.get_unique_operator_name('matmul')
-    container.add_node(
-        'MatMul', [sub_name, coef_name],
-        out[0].full_name, name=op_name)
-
-
-

We need to let skl2onnx know about the new converter.

-
update_registered_converter(
-    DecorrelateTransformer, "SklearnDecorrelateTransformer",
-    decorrelate_transformer_shape_calculator,
-    decorrelate_transformer_converter)
-
-
-onx = to_onnx(dec, X.astype(numpy.float32))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, got))
-
-
-

Out:

-
(6.046576181972796e-07, 0.0002951417065241126)
-
-
-

Let’s check it works as well with double.

-
onx = to_onnx(dec, X.astype(numpy.float64))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float64))
-got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
-print(diff(exp, got))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-

The differences are smaller with double as expected.

-

Total running time of the script: ( 0 minutes 0.078 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Two ways to implement a converter - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Two ways to implement a converter#

+

There are two ways to write a converter. The first one +is less verbose and easier to understand +(see k_means.py). +The other is very verbose (see ada_boost.py +for an example).

+

The first way is used in Implement a new converter. +This one demonstrates the second way which is usually the one +used in other converter library. It is more verbose.

+
+

Custom model#

+

It basically copies what is in example +:ref:`l-plot-custom-converter.

+
from skl2onnx.common.data_types import guess_proto_type
+from onnxconverter_common.onnx_ops import apply_sub
+from onnxruntime import InferenceSession
+from skl2onnx import update_registered_converter
+from skl2onnx import to_onnx
+import numpy
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+
+
+class DecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.mean_`: average
+    * `self.coef_`: square root of the coveriance matrix
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        if sample_weights is not None:
+            raise NotImplementedError(
+                "sample_weights != None is not implemented.")
+        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
+        X = X - self.mean_
+        V = X.T @ X / X.shape[0]
+        if self.alpha != 0:
+            V += numpy.identity(V.shape[0]) * self.alpha
+        L, P = numpy.linalg.eig(V)
+        Linv = L ** (-0.5)
+        diag = numpy.diag(Linv)
+        root = P @ diag @ P.transpose()
+        self.coef_ = root
+        return self
+
+    def transform(self, X):
+        return (X - self.mean_) @ self.coef_
+
+
+data = load_iris()
+X = data.data
+
+dec = DecorrelateTransformer()
+dec.fit(X)
+pred = dec.transform(X[:5])
+print(pred)
+
+
+
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
+ [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
+ [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
+ [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
+ [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
+
+
+
+
+

Conversion into ONNX#

+

The shape calculator does not change.

+
def decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    # The shape may be unknown. *get_first_dimension*
+    # returns the appropriate value, None in most cases
+    # meaning the transformer can process any batch of observations.
+    input_dim = operator.inputs[0].get_first_dimension()
+    output_type = input_type([input_dim, op.coef_.shape[1]])
+    operator.outputs[0].type = output_type
+
+
+

The converter is different.

+
def decorrelate_transformer_converter(scope, operator, container):
+    op = operator.raw_operator
+    out = operator.outputs
+
+    # We retrieve the unique input.
+    X = operator.inputs[0]
+
+    # In most case, computation happen in floats.
+    # But it might be with double. ONNX is very strict
+    # about types, every constant should have the same
+    # type as the input.
+    proto_dtype = guess_proto_type(X.type)
+
+    mean_name = scope.get_unique_variable_name('mean')
+    container.add_initializer(mean_name, proto_dtype,
+                              op.mean_.shape, list(op.mean_.ravel()))
+
+    coef_name = scope.get_unique_variable_name('coef')
+    container.add_initializer(coef_name, proto_dtype,
+                              op.coef_.shape, list(op.coef_.ravel()))
+
+    op_name = scope.get_unique_operator_name('sub')
+    sub_name = scope.get_unique_variable_name('sub')
+    # This function is defined in package onnxconverter_common.
+    # Most common operators can be added to the graph with
+    # these functions. It handles the case when specifications
+    # changed accross opsets (a parameter becomes an input
+    # for example).
+    apply_sub(scope, [X.full_name, mean_name], sub_name, container,
+              operator_name=op_name)
+
+    op_name = scope.get_unique_operator_name('matmul')
+    container.add_node(
+        'MatMul', [sub_name, coef_name],
+        out[0].full_name, name=op_name)
+
+
+

We need to let skl2onnx know about the new converter.

+
update_registered_converter(
+    DecorrelateTransformer, "SklearnDecorrelateTransformer",
+    decorrelate_transformer_shape_calculator,
+    decorrelate_transformer_converter)
+
+
+onx = to_onnx(dec, X.astype(numpy.float32))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, got))
+
+
+
(6.04657619085458e-07, 0.0002951417065406967)
+
+
+

Let’s check it works as well with double.

+
onx = to_onnx(dec, X.astype(numpy.float64))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float64))
+got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
+print(diff(exp, got))
+
+
+
(0.0, 0.0)
+
+
+

The differences are smaller with double as expected.

+

Total running time of the script: ( 0 minutes 0.028 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_kcustom_converter_wrapper.html b/auto_tutorial/plot_kcustom_converter_wrapper.html index 977c4cc4a..0264b7e82 100644 --- a/auto_tutorial/plot_kcustom_converter_wrapper.html +++ b/auto_tutorial/plot_kcustom_converter_wrapper.html @@ -1,712 +1,577 @@ - - - - - - - - - Implement a new converter using other converters — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Implement a new converter using other converters#

-

In many cases, a custom models leverages existing models -which already have an associated converter. To convert this -patchwork, existing converters must be called. This example -shows how to do that. Example Implement a new converter -can be rewritten by using a PCA. -We could then reuse the converter associated to this model.

- -
-

Custom model#

-

Let’s implement a simple custom model using -scikit-learn API. The model is preprocessing -which decorrelates correlated random variables. -If X is a matrix of features, V=\frac{1}{n}X'X -is the covariance matrix. We compute X V^{1/2}.

-
from mlprodict.onnxrt import OnnxInference
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-import pickle
-from io import BytesIO
-import numpy
-from numpy.testing import assert_almost_equal
-from onnxruntime import InferenceSession
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-from sklearn.decomposition import PCA
-from skl2onnx import update_registered_converter
-from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
-from skl2onnx import to_onnx
-
-
-class DecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.mean_`: average
-    * `self.coef_`: square root of the coveriance matrix
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        self.pca_ = PCA(X.shape[1])
-        self.pca_.fit(X)
-        return self
-
-    def transform(self, X):
-        return self.pca_.transform(X)
-
-
-def test_decorrelate_transformer():
-    data = load_iris()
-    X = data.data
-
-    dec = DecorrelateTransformer()
-    dec.fit(X)
-    pred = dec.transform(X)
-    cov = pred.T @ pred
-    for i in range(cov.shape[0]):
-        cov[i, i] = 1.
-    assert_almost_equal(numpy.identity(4), cov)
-
-    st = BytesIO()
-    pickle.dump(dec, st)
-    dec2 = pickle.load(BytesIO(st.getvalue()))
-    assert_almost_equal(dec.transform(X), dec2.transform(X))
-
-
-test_decorrelate_transformer()
-
-data = load_iris()
-X = data.data
-
-dec = DecorrelateTransformer()
-dec.fit(X)
-pred = dec.transform(X[:5])
-print(pred)
-
-
-

Out:

-
[[-2.68412563e+00  3.19397247e-01 -2.79148276e-02 -2.26243707e-03]
- [-2.71414169e+00 -1.77001225e-01 -2.10464272e-01 -9.90265503e-02]
- [-2.88899057e+00 -1.44949426e-01  1.79002563e-02 -1.99683897e-02]
- [-2.74534286e+00 -3.18298979e-01  3.15593736e-02  7.55758166e-02]
- [-2.72871654e+00  3.26754513e-01  9.00792406e-02  6.12585926e-02]]
-
-
-
-
-

Conversion into ONNX#

-

Let’s try to convert it and see what happens.

-
try:
-    to_onnx(dec, X.astype(numpy.float32))
-except Exception as e:
-    print(e)
-
-
-

Out:

-
Unable to find a shape calculator for type '<class '__main__.DecorrelateTransformer'>'.
-It usually means the pipeline being converted contains a
-transformer or a predictor with no corresponding converter
-implemented in sklearn-onnx. If the converted is implemented
-in another library, you need to register
-the converted so that it can be used by sklearn-onnx (function
-update_registered_converter). If the model is not yet covered
-by sklearn-onnx, you may raise an issue to
-https://github.com/onnx/sklearn-onnx/issues
-to get the converter implemented or even contribute to the
-project. If the model is a custom model, a new converter must
-be implemented. Examples can be found in the gallery.
-
-
-

This error means there is no converter associated -to DecorrelateTransformer. Let’s do it. -It requires to implement the two following -functions, a shape calculator and a converter -with the same signature as below. -First the shape calculator. We retrieve the input type -add tells the output type has the same type, -the same number of rows and a specific number of columns.

-
def decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    input_dim = operator.inputs[0].type.shape[0]
-    output_type = input_type([input_dim, op.pca_.components_.shape[1]])
-    operator.outputs[0].type = output_type
-
-
-

The converter. One thing we need to pay attention to -is the target opset. This information is important -to make sure that every node is defined following the -specifications of that opset.

-
def decorrelate_transformer_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    # We retrieve the unique input.
-    X = operator.inputs[0]
-
-    # We tell in ONNX language how to compute the unique output.
-    # op_version=opv tells which opset is requested
-    Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])
-    Y.add_to(scope, container)
-
-
-

We need to let skl2onnx know about the new converter.

-
update_registered_converter(
-    DecorrelateTransformer, "SklearnDecorrelateTransformer",
-    decorrelate_transformer_shape_calculator,
-    decorrelate_transformer_converter)
-
-
-onx = to_onnx(dec, X.astype(numpy.float32))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, got))
-
-
-

Out:

-
(3.56012595403854e-07, 0.0003158352661955726)
-
-
-

Let’s check it works as well with double.

-
onx = to_onnx(dec, X.astype(numpy.float64))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float64))
-got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
-print(diff(exp, got))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-

The differences are smaller with double as expected.

-
-
-

Final graph#

-
oinf = OnnxInference(onx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot kcustom converter wrapper

Total running time of the script: ( 0 minutes 0.530 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Implement a new converter using other converters - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Implement a new converter using other converters#

+

In many cases, a custom models leverages existing models +which already have an associated converter. To convert this +patchwork, existing converters must be called. This example +shows how to do that. Example Implement a new converter +can be rewritten by using a PCA. +We could then reuse the converter associated to this model.

+
+

Custom model#

+

Let’s implement a simple custom model using +scikit-learn API. The model is preprocessing +which decorrelates correlated random variables. +If X is a matrix of features, V=\frac{1}{n}X'X +is the covariance matrix. We compute X V^{1/2}.

+
from mlprodict.onnxrt import OnnxInference
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+import pickle
+from io import BytesIO
+import numpy
+from numpy.testing import assert_almost_equal
+from onnxruntime import InferenceSession
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+from sklearn.decomposition import PCA
+from skl2onnx import update_registered_converter
+from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
+from skl2onnx import to_onnx
+
+
+class DecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.mean_`: average
+    * `self.coef_`: square root of the coveriance matrix
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        self.pca_ = PCA(X.shape[1])
+        self.pca_.fit(X)
+        return self
+
+    def transform(self, X):
+        return self.pca_.transform(X)
+
+
+def test_decorrelate_transformer():
+    data = load_iris()
+    X = data.data
+
+    dec = DecorrelateTransformer()
+    dec.fit(X)
+    pred = dec.transform(X)
+    cov = pred.T @ pred
+    for i in range(cov.shape[0]):
+        cov[i, i] = 1.
+    assert_almost_equal(numpy.identity(4), cov)
+
+    st = BytesIO()
+    pickle.dump(dec, st)
+    dec2 = pickle.load(BytesIO(st.getvalue()))
+    assert_almost_equal(dec.transform(X), dec2.transform(X))
+
+
+test_decorrelate_transformer()
+
+data = load_iris()
+X = data.data
+
+dec = DecorrelateTransformer()
+dec.fit(X)
+pred = dec.transform(X[:5])
+print(pred)
+
+
+
[[-2.68412563e+00  3.19397247e-01 -2.79148276e-02 -2.26243707e-03]
+ [-2.71414169e+00 -1.77001225e-01 -2.10464272e-01 -9.90265503e-02]
+ [-2.88899057e+00 -1.44949426e-01  1.79002563e-02 -1.99683897e-02]
+ [-2.74534286e+00 -3.18298979e-01  3.15593736e-02  7.55758166e-02]
+ [-2.72871654e+00  3.26754513e-01  9.00792406e-02  6.12585926e-02]]
+
+
+
+
+

Conversion into ONNX#

+

Let’s try to convert it and see what happens.

+
try:
+    to_onnx(dec, X.astype(numpy.float32))
+except Exception as e:
+    print(e)
+
+
+
Unable to find a shape calculator for type '<class '__main__.DecorrelateTransformer'>'.
+It usually means the pipeline being converted contains a
+transformer or a predictor with no corresponding converter
+implemented in sklearn-onnx. If the converted is implemented
+in another library, you need to register
+the converted so that it can be used by sklearn-onnx (function
+update_registered_converter). If the model is not yet covered
+by sklearn-onnx, you may raise an issue to
+https://github.com/onnx/sklearn-onnx/issues
+to get the converter implemented or even contribute to the
+project. If the model is a custom model, a new converter must
+be implemented. Examples can be found in the gallery.
+
+
+

This error means there is no converter associated +to DecorrelateTransformer. Let’s do it. +It requires to implement the two following +functions, a shape calculator and a converter +with the same signature as below. +First the shape calculator. We retrieve the input type +add tells the output type has the same type, +the same number of rows and a specific number of columns.

+
def decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    input_dim = operator.inputs[0].type.shape[0]
+    output_type = input_type([input_dim, op.pca_.components_.shape[1]])
+    operator.outputs[0].type = output_type
+
+
+

The converter. One thing we need to pay attention to +is the target opset. This information is important +to make sure that every node is defined following the +specifications of that opset.

+
def decorrelate_transformer_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    # We retrieve the unique input.
+    X = operator.inputs[0]
+
+    # We tell in ONNX language how to compute the unique output.
+    # op_version=opv tells which opset is requested
+    Y = OnnxSubEstimator(op.pca_, X, op_version=opv, output_names=out[:1])
+    Y.add_to(scope, container)
+
+
+

We need to let skl2onnx know about the new converter.

+
update_registered_converter(
+    DecorrelateTransformer, "SklearnDecorrelateTransformer",
+    decorrelate_transformer_shape_calculator,
+    decorrelate_transformer_converter)
+
+
+onx = to_onnx(dec, X.astype(numpy.float32))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, got))
+
+
+
(3.560125949597648e-07, 0.0003158352661960492)
+
+
+

Let’s check it works as well with double.

+
onx = to_onnx(dec, X.astype(numpy.float64))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float64))
+got = sess.run(None, {'X': X.astype(numpy.float64)})[0]
+print(diff(exp, got))
+
+
+
(0.0, 0.0)
+
+
+

The differences are smaller with double as expected.

+
+
+

Final graph#

+
oinf = OnnxInference(onx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot kcustom converter wrapper

Total running time of the script: ( 0 minutes 0.499 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_lcustom_options.html b/auto_tutorial/plot_lcustom_options.html index b9a342aea..0d4e170e2 100644 --- a/auto_tutorial/plot_lcustom_options.html +++ b/auto_tutorial/plot_lcustom_options.html @@ -1,769 +1,635 @@ - - - - - - - - - A new converter with options — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

A new converter with options#

-

Options are used to implement different conversion -for a same model. The options can be used to replace -an operator MatMul by the Gemm operator and compare the -processing time for both graph. Let’s see how to retrieve -the options within a converter.

-

Example Implement a new converter implements a converter -which uses operator MatMul. Option use_gemm is used to -replace MatMul by Gemm.

- -
-

Custom model#

-
from mlprodict.onnxrt import OnnxInference
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from pandas import DataFrame
-from skl2onnx.tutorial import measure_time
-import numpy
-from onnxruntime import InferenceSession
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-from skl2onnx import update_registered_converter
-from skl2onnx.common.data_types import guess_numpy_type
-from skl2onnx.algebra.onnx_ops import (
-    OnnxSub, OnnxMatMul, OnnxGemm)
-from skl2onnx import to_onnx
-
-
-class DecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.mean_`: average
-    * `self.coef_`: square root of the coveriance matrix
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        if sample_weights is not None:
-            raise NotImplementedError(
-                "sample_weights != None is not implemented.")
-        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
-        X = X - self.mean_
-        V = X.T @ X / X.shape[0]
-        if self.alpha != 0:
-            V += numpy.identity(V.shape[0]) * self.alpha
-        L, P = numpy.linalg.eig(V)
-        Linv = L ** (-0.5)
-        diag = numpy.diag(Linv)
-        root = P @ diag @ P.transpose()
-        self.coef_ = root
-        return self
-
-    def transform(self, X):
-        return (X - self.mean_) @ self.coef_
-
-
-data = load_iris()
-X = data.data
-
-dec = DecorrelateTransformer()
-dec.fit(X)
-pred = dec.transform(X[:5])
-print(pred)
-
-
-

Out:

-
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
- [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
- [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
- [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
- [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
-
-
-
-
-

Conversion into ONNX#

-

Let’s try to convert it and see what happens.

-
def decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    input_dim = operator.inputs[0].type.shape[0]
-    output_type = input_type([input_dim, op.coef_.shape[1]])
-    operator.outputs[0].type = output_type
-
-
-def decorrelate_transformer_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    X = operator.inputs[0]
-
-    dtype = guess_numpy_type(X.type)
-    options = container.get_options(op, dict(use_gemm=False))
-    use_gemm = options['use_gemm']
-    print('conversion: use_gemm=', use_gemm)
-
-    if use_gemm:
-        Y = OnnxGemm(X, op.coef_.astype(dtype),
-                     (- op.mean_ @ op.coef_).astype(dtype),
-                     op_version=opv, alpha=1., beta=1.,
-                     output_names=out[:1])
-    else:
-        Y = OnnxMatMul(
-            OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
-            op.coef_.astype(dtype),
-            op_version=opv, output_names=out[:1])
-    Y.add_to(scope, container)
-
-
-

The registration needs to declare the options -supported by the converted.

-
update_registered_converter(
-    DecorrelateTransformer, "SklearnDecorrelateTransformer",
-    decorrelate_transformer_shape_calculator,
-    decorrelate_transformer_converter,
-    options={'use_gemm': [True, False]})
-
-
-onx = to_onnx(dec, X.astype(numpy.float32))
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, got))
-
-
-

Out:

-
conversion: use_gemm= False
-(6.046576181972796e-07, 0.0002951417065241126)
-
-
-

We try the non default option, use_gemm: True.

-
onx2 = to_onnx(dec, X.astype(numpy.float32),
-               options={'use_gemm': True})
-
-sess2 = InferenceSession(onx2.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-got2 = sess2.run(None, {'X': X.astype(numpy.float32)})[0]
-
-print(diff(exp, got2))
-
-
-

Out:

-
conversion: use_gemm= True
-(2.01757041429218e-06, 0.0005483764980302357)
-
-
-

Visually.

-
oinf = OnnxInference(onx2)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot lcustom options
-
-

Time comparison#

-

Let’s compare the two computation.

-
X32 = X.astype(numpy.float32)
-obs = []
-
-context = {'sess': sess, 'X32': X32}
-mt = measure_time(
-    "sess.run(None, {'X': X32})", context, div_by_number=True,
-    number=100, repeat=1000)
-mt['use_gemm'] = False
-obs.append(mt)
-
-context = {'sess2': sess2, 'X32': X32}
-mt2 = measure_time(
-    "sess2.run(None, {'X': X32})", context, div_by_number=True,
-    number=10, repeat=100)
-mt2['use_gemm'] = True
-obs.append(mt2)
-
-DataFrame(obs).T
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
01
average0.000020.000012
deviation0.0000030.000001
min_exec0.0000190.000012
max_exec0.0000570.000019
repeat1000100
number10010
use_gemmFalseTrue
-
-
-
-

Total running time of the script: ( 0 minutes 2.376 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + A new converter with options - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

A new converter with options#

+

Options are used to implement different conversion +for a same model. The options can be used to replace +an operator MatMul by the Gemm operator and compare the +processing time for both graph. Let’s see how to retrieve +the options within a converter.

+

Example Implement a new converter implements a converter +which uses operator MatMul. Option use_gemm is used to +replace MatMul by Gemm.

+
+

Custom model#

+
from mlprodict.onnxrt import OnnxInference
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from pandas import DataFrame
+from skl2onnx.tutorial import measure_time
+import numpy
+from onnxruntime import InferenceSession
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+from skl2onnx import update_registered_converter
+from skl2onnx.common.data_types import guess_numpy_type
+from skl2onnx.algebra.onnx_ops import (
+    OnnxSub, OnnxMatMul, OnnxGemm)
+from skl2onnx import to_onnx
+
+
+class DecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.mean_`: average
+    * `self.coef_`: square root of the coveriance matrix
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        if sample_weights is not None:
+            raise NotImplementedError(
+                "sample_weights != None is not implemented.")
+        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
+        X = X - self.mean_
+        V = X.T @ X / X.shape[0]
+        if self.alpha != 0:
+            V += numpy.identity(V.shape[0]) * self.alpha
+        L, P = numpy.linalg.eig(V)
+        Linv = L ** (-0.5)
+        diag = numpy.diag(Linv)
+        root = P @ diag @ P.transpose()
+        self.coef_ = root
+        return self
+
+    def transform(self, X):
+        return (X - self.mean_) @ self.coef_
+
+
+data = load_iris()
+X = data.data
+
+dec = DecorrelateTransformer()
+dec.fit(X)
+pred = dec.transform(X[:5])
+print(pred)
+
+
+
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
+ [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
+ [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
+ [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
+ [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
+
+
+
+
+

Conversion into ONNX#

+

Let’s try to convert it and see what happens.

+
def decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    input_dim = operator.inputs[0].type.shape[0]
+    output_type = input_type([input_dim, op.coef_.shape[1]])
+    operator.outputs[0].type = output_type
+
+
+def decorrelate_transformer_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    X = operator.inputs[0]
+
+    dtype = guess_numpy_type(X.type)
+    options = container.get_options(op, dict(use_gemm=False))
+    use_gemm = options['use_gemm']
+    print('conversion: use_gemm=', use_gemm)
+
+    if use_gemm:
+        Y = OnnxGemm(X, op.coef_.astype(dtype),
+                     (- op.mean_ @ op.coef_).astype(dtype),
+                     op_version=opv, alpha=1., beta=1.,
+                     output_names=out[:1])
+    else:
+        Y = OnnxMatMul(
+            OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
+            op.coef_.astype(dtype),
+            op_version=opv, output_names=out[:1])
+    Y.add_to(scope, container)
+
+
+

The registration needs to declare the options +supported by the converted.

+
update_registered_converter(
+    DecorrelateTransformer, "SklearnDecorrelateTransformer",
+    decorrelate_transformer_shape_calculator,
+    decorrelate_transformer_converter,
+    options={'use_gemm': [True, False]})
+
+
+onx = to_onnx(dec, X.astype(numpy.float32))
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+got = sess.run(None, {'X': X.astype(numpy.float32)})[0]
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, got))
+
+
+
conversion: use_gemm= False
+(6.04657619085458e-07, 0.0002951417065406967)
+
+
+

We try the non default option, use_gemm: True.

+
onx2 = to_onnx(dec, X.astype(numpy.float32),
+               options={'use_gemm': True})
+
+sess2 = InferenceSession(onx2.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+got2 = sess2.run(None, {'X': X.astype(numpy.float32)})[0]
+
+print(diff(exp, got2))
+
+
+
conversion: use_gemm= True
+(2.01757041717876e-06, 0.0005483764980468156)
+
+
+

Visually.

+
oinf = OnnxInference(onx2)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot lcustom options
+
+

Time comparison#

+

Let’s compare the two computation.

+
X32 = X.astype(numpy.float32)
+obs = []
+
+context = {'sess': sess, 'X32': X32}
+mt = measure_time(
+    "sess.run(None, {'X': X32})", context, div_by_number=True,
+    number=100, repeat=1000)
+mt['use_gemm'] = False
+obs.append(mt)
+
+context = {'sess2': sess2, 'X32': X32}
+mt2 = measure_time(
+    "sess2.run(None, {'X': X32})", context, div_by_number=True,
+    number=10, repeat=100)
+mt2['use_gemm'] = True
+obs.append(mt2)
+
+DataFrame(obs).T
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
01
average0.0000170.000011
deviation0.0000030.000001
min_exec0.0000150.00001
max_exec0.0000410.000016
repeat1000100
number10010
use_gemmFalseTrue
+
+
+
+

Total running time of the script: ( 0 minutes 1.917 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_mcustom_parser.html b/auto_tutorial/plot_mcustom_parser.html index aeb486b63..c50e53368 100644 --- a/auto_tutorial/plot_mcustom_parser.html +++ b/auto_tutorial/plot_mcustom_parser.html @@ -1,713 +1,579 @@ - - - - - - - - - Change the number of outputs by adding a parser — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - - - -
- -
- - -
- - - - -
- -
- - -
-

Change the number of outputs by adding a parser#

-

By default, sklearn-onnx assumes that a classifier -has two outputs (label and probabilities), a regressor -has one output (prediction), a transform has one output -(the transformed data). What if it is not the case? -The following example creates a custom converter -and a custom parser which defines the number of outputs -expected by the converted model.

-

Example A new converter with options shows a converter -which selects two ways to compute the same outputs. -In this one, the converter produces both. That would not -be a very efficient converter but that’s just for the sake -of using a parser. By default, a transformer only returns -one output but both are needed.

- -
-

A new transformer#

-
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-from mlprodict.onnxrt import OnnxInference
-import numpy
-from onnxruntime import InferenceSession
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-from skl2onnx import update_registered_converter
-from skl2onnx.common.data_types import guess_numpy_type
-from skl2onnx.algebra.onnx_ops import (
-    OnnxSub, OnnxMatMul, OnnxGemm)
-from skl2onnx import to_onnx, get_model_alias
-
-
-class DecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.mean_`: average
-    * `self.coef_`: square root of the coveriance matrix
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        if sample_weights is not None:
-            raise NotImplementedError(
-                "sample_weights != None is not implemented.")
-        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
-        X = X - self.mean_
-        V = X.T @ X / X.shape[0]
-        if self.alpha != 0:
-            V += numpy.identity(V.shape[0]) * self.alpha
-        L, P = numpy.linalg.eig(V)
-        Linv = L ** (-0.5)
-        diag = numpy.diag(Linv)
-        root = P @ diag @ P.transpose()
-        self.coef_ = root
-        return self
-
-    def transform(self, X):
-        return (X - self.mean_) @ self.coef_
-
-
-data = load_iris()
-X = data.data
-
-dec = DecorrelateTransformer()
-dec.fit(X)
-pred = dec.transform(X[:5])
-print(pred)
-
-
-

Out:

-
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
- [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
- [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
- [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
- [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
-
-
-
-
-

Conversion into ONNX with two outputs#

-

Let’s try to convert it and see what happens.

-
def decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    input_dim = operator.inputs[0].type.shape[0]
-    output_type = input_type([input_dim, op.coef_.shape[1]])
-    operator.outputs[0].type = output_type
-
-
-def decorrelate_transformer_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    X = operator.inputs[0]
-
-    dtype = guess_numpy_type(X.type)
-
-    Y1 = OnnxMatMul(
-        OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
-        op.coef_.astype(dtype),
-        op_version=opv, output_names=out[:1])
-
-    Y2 = OnnxGemm(X, op.coef_.astype(dtype),
-                  (- op.mean_ @ op.coef_).astype(dtype),
-                  op_version=opv, alpha=1., beta=1.,
-                  output_names=out[1:2])
-
-    Y1.add_to(scope, container)
-    Y2.add_to(scope, container)
-
-
-def decorrelate_transformer_parser(
-        scope, model, inputs, custom_parsers=None):
-    alias = get_model_alias(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-
-    # inputs
-    this_operator.inputs.append(inputs[0])
-
-    # outputs
-    cls_type = inputs[0].type.__class__
-    val_y1 = scope.declare_local_variable('nogemm', cls_type())
-    val_y2 = scope.declare_local_variable('gemm', cls_type())
-    this_operator.outputs.append(val_y1)
-    this_operator.outputs.append(val_y2)
-
-    # ends
-    return this_operator.outputs
-
-
-

The registration needs to declare the parser as well.

-
update_registered_converter(
-    DecorrelateTransformer, "SklearnDecorrelateTransformer",
-    decorrelate_transformer_shape_calculator,
-    decorrelate_transformer_converter,
-    parser=decorrelate_transformer_parser)
-
-
-

And conversion.

-
onx = to_onnx(dec, X.astype(numpy.float32),
-              target_opset=14)
-
-sess = InferenceSession(onx.SerializeToString())
-
-exp = dec.transform(X.astype(numpy.float32))
-results = sess.run(None, {'X': X.astype(numpy.float32)})
-y1 = results[0]
-y2 = results[1]
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, y1))
-print(diff(exp, y2))
-
-
-

Out:

-
(6.046576181972796e-07, 0.0002951417065241126)
-(2.01757041429218e-06, 0.0005483764980302357)
-
-
-

It works. The final looks like the following.

-
oinf = OnnxInference(onx, runtime="python_compiled")
-print(oinf)
-
-
-

Out:

-
OnnxInference(...)
-    def compiled_run(dict_inputs, yield_ops=None, context=None):
-        if yield_ops is not None:
-            raise NotImplementedError('yields_ops should be None.')
-        # init: Ge_Gemmcst1 (Ge_Gemmcst1)
-        # init: Ma_MatMulcst (Ma_MatMulcst)
-        # init: Su_Subcst (Su_Subcst)
-        # inputs
-        X = dict_inputs['X']
-        (Su_C0, ) = n0_sub(X, Su_Subcst)
-        (gemm, ) = n1_gemm(X, Ma_MatMulcst, Ge_Gemmcst1)
-        (nogemm, ) = n2_matmul(Su_C0, Ma_MatMulcst)
-        return {
-            'nogemm': nogemm,
-            'gemm': gemm,
-        }
-
-
-
-
-

Final graph#

-
ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot mcustom parser

Total running time of the script: ( 0 minutes 0.597 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Change the number of outputs by adding a parser - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Change the number of outputs by adding a parser#

+

By default, sklearn-onnx assumes that a classifier +has two outputs (label and probabilities), a regressor +has one output (prediction), a transform has one output +(the transformed data). What if it is not the case? +The following example creates a custom converter +and a custom parser which defines the number of outputs +expected by the converted model.

+

Example A new converter with options shows a converter +which selects two ways to compute the same outputs. +In this one, the converter produces both. That would not +be a very efficient converter but that’s just for the sake +of using a parser. By default, a transformer only returns +one output but both are needed.

+
+

A new transformer#

+
from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+from mlprodict.onnxrt import OnnxInference
+import numpy
+from onnxruntime import InferenceSession
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+from skl2onnx import update_registered_converter
+from skl2onnx.common.data_types import guess_numpy_type
+from skl2onnx.algebra.onnx_ops import (
+    OnnxSub, OnnxMatMul, OnnxGemm)
+from skl2onnx import to_onnx, get_model_alias
+
+
+class DecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.mean_`: average
+    * `self.coef_`: square root of the coveriance matrix
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        if sample_weights is not None:
+            raise NotImplementedError(
+                "sample_weights != None is not implemented.")
+        self.mean_ = numpy.mean(X, axis=0, keepdims=True)
+        X = X - self.mean_
+        V = X.T @ X / X.shape[0]
+        if self.alpha != 0:
+            V += numpy.identity(V.shape[0]) * self.alpha
+        L, P = numpy.linalg.eig(V)
+        Linv = L ** (-0.5)
+        diag = numpy.diag(Linv)
+        root = P @ diag @ P.transpose()
+        self.coef_ = root
+        return self
+
+    def transform(self, X):
+        return (X - self.mean_) @ self.coef_
+
+
+data = load_iris()
+X = data.data
+
+dec = DecorrelateTransformer()
+dec.fit(X)
+pred = dec.transform(X[:5])
+print(pred)
+
+
+
[[ 0.0167562   0.52111756 -1.24946737 -0.56194325]
+ [-0.0727878  -0.80853732 -1.43841018 -0.37441392]
+ [-0.69971891 -0.09950908 -1.2138161  -0.3499275 ]
+ [-1.13063404 -0.13540568 -0.79087008 -0.73938966]
+ [-0.35790036  0.91900236 -1.04034399 -0.6509266 ]]
+
+
+
+
+

Conversion into ONNX with two outputs#

+

Let’s try to convert it and see what happens.

+
def decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    input_dim = operator.inputs[0].type.shape[0]
+    output_type = input_type([input_dim, op.coef_.shape[1]])
+    operator.outputs[0].type = output_type
+
+
+def decorrelate_transformer_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    X = operator.inputs[0]
+
+    dtype = guess_numpy_type(X.type)
+
+    Y1 = OnnxMatMul(
+        OnnxSub(X, op.mean_.astype(dtype), op_version=opv),
+        op.coef_.astype(dtype),
+        op_version=opv, output_names=out[:1])
+
+    Y2 = OnnxGemm(X, op.coef_.astype(dtype),
+                  (- op.mean_ @ op.coef_).astype(dtype),
+                  op_version=opv, alpha=1., beta=1.,
+                  output_names=out[1:2])
+
+    Y1.add_to(scope, container)
+    Y2.add_to(scope, container)
+
+
+def decorrelate_transformer_parser(
+        scope, model, inputs, custom_parsers=None):
+    alias = get_model_alias(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+
+    # inputs
+    this_operator.inputs.append(inputs[0])
+
+    # outputs
+    cls_type = inputs[0].type.__class__
+    val_y1 = scope.declare_local_variable('nogemm', cls_type())
+    val_y2 = scope.declare_local_variable('gemm', cls_type())
+    this_operator.outputs.append(val_y1)
+    this_operator.outputs.append(val_y2)
+
+    # ends
+    return this_operator.outputs
+
+
+

The registration needs to declare the parser as well.

+
update_registered_converter(
+    DecorrelateTransformer, "SklearnDecorrelateTransformer",
+    decorrelate_transformer_shape_calculator,
+    decorrelate_transformer_converter,
+    parser=decorrelate_transformer_parser)
+
+
+

And conversion.

+
onx = to_onnx(dec, X.astype(numpy.float32),
+              target_opset=14)
+
+sess = InferenceSession(onx.SerializeToString())
+
+exp = dec.transform(X.astype(numpy.float32))
+results = sess.run(None, {'X': X.astype(numpy.float32)})
+y1 = results[0]
+y2 = results[1]
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, y1))
+print(diff(exp, y2))
+
+
+
(6.04657619085458e-07, 0.0002951417065406967)
+(2.01757041717876e-06, 0.0005483764980468156)
+
+
+

It works. The final looks like the following.

+
oinf = OnnxInference(onx, runtime="python_compiled")
+print(oinf)
+
+
+
OnnxInference(...)
+    def compiled_run(dict_inputs, yield_ops=None, context=None, attributes=None):
+        if yield_ops is not None:
+            raise NotImplementedError('yields_ops should be None.')
+        # init: Ge_Gemmcst1 (Ge_Gemmcst1)
+        # init: Ma_MatMulcst (Ma_MatMulcst)
+        # init: Su_Subcst (Su_Subcst)
+        # inputs
+        X = dict_inputs['X']
+        (Su_C0, ) = n0_sub(X, Su_Subcst)
+        (nogemm, ) = n1_matmul(Su_C0, Ma_MatMulcst)
+        (gemm, ) = n2_gemm(X, Ma_MatMulcst, Ge_Gemmcst1)
+        return {
+            'nogemm': nogemm,
+            'gemm': gemm,
+        }
+
+
+
+
+

Final graph#

+
ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot mcustom parser

Total running time of the script: ( 0 minutes 0.255 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_ngrams.html b/auto_tutorial/plot_ngrams.html new file mode 100644 index 000000000..a3bb8d19c --- /dev/null +++ b/auto_tutorial/plot_ngrams.html @@ -0,0 +1,491 @@ + + + + + + + + + Tricky issue when converting CountVectorizer or TfidfVectorizer - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Tricky issue when converting CountVectorizer or TfidfVectorizer#

+

This issue is described at scikit-learn/issues/13733. +If a CountVectorizer or a TfidfVectorizer produces a token with a space, +skl2onnx cannot know if it a bi-grams or a unigram with a space.

+
+

A simple example impossible to convert#

+
import pprint
+import numpy
+from numpy.testing import assert_almost_equal
+from onnxruntime import InferenceSession
+from sklearn.feature_extraction.text import TfidfVectorizer
+from skl2onnx import to_onnx
+from skl2onnx.sklapi import TraceableTfidfVectorizer
+import skl2onnx.sklapi.register  # noqa
+
+corpus = numpy.array([
+    "This is the first document.",
+    "This document is the second document.",
+    "Is this the first document?",
+    "",
+]).reshape((4, ))
+
+pattern = r"\b[a-z ]{1,10}\b"
+mod1 = TfidfVectorizer(ngram_range=(1, 2),
+                       token_pattern=pattern)
+mod1.fit(corpus)
+
+
+
+
TfidfVectorizer(ngram_range=(1, 2), token_pattern='\\b[a-z ]{1,10}\\b')
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
+
+
+

Unigrams and bi-grams are placed into the following container +which maps it to its column index.

+ +
{'document': 0,
+ 'document ': 1,
+ 'document  is the ': 2,
+ 'is the ': 3,
+ 'is the  second ': 4,
+ 'is this ': 5,
+ 'is this  the first ': 6,
+ 'second ': 7,
+ 'second  document': 8,
+ 'the first ': 9,
+ 'the first  document': 10,
+ 'this ': 11,
+ 'this  document ': 12,
+ 'this is ': 13,
+ 'this is  the first ': 14}
+
+
+

Conversion.

+
try:
+    to_onnx(mod1, corpus)
+except RuntimeError as e:
+    print(e)
+
+
+
There were ambiguities between n-grams and tokens. 2 errors occurred. You can fix it by using class TraceableTfidfVectorizer.
+You can learn more at https://github.com/scikit-learn/scikit-learn/issues/13733.
+Unable to split n-grams 'is this  the first ' into tokens ('is', 'this', 'the', 'first ') existing in the vocabulary. Token 'is' does not exist in the vocabulary..
+Unable to split n-grams 'this is  the first ' into tokens ('this', 'is', 'the', 'first ') existing in the vocabulary. Token 'this' does not exist in the vocabulary..
+
+
+
+
+

TraceableTfidfVectorizer#

+

Class TraceableTfidfVectorizer is equivalent to +sklearn.feature_extraction.text.TfidfVectorizer +but stores the unigrams and bi-grams of the vocabulary with tuple +instead of concatenating every piece into a string.

+
mod2 = TraceableTfidfVectorizer(
+    ngram_range=(1, 2), token_pattern=pattern)
+mod2.fit(corpus)
+
+pprint.pprint(mod2.vocabulary_)
+
+
+
{('document',): 0,
+ ('document ',): 1,
+ ('document ', 'is the '): 2,
+ ('is the ',): 3,
+ ('is the ', 'second '): 4,
+ ('is this ',): 5,
+ ('is this ', 'the first '): 6,
+ ('second ',): 7,
+ ('second ', 'document'): 8,
+ ('the first ',): 9,
+ ('the first ', 'document'): 10,
+ ('this ',): 11,
+ ('this ', 'document '): 12,
+ ('this is ',): 13,
+ ('this is ', 'the first '): 14}
+
+
+

Let’s check it produces the same results.

+
assert_almost_equal(mod1.transform(corpus).todense(),
+                    mod2.transform(corpus).todense())
+
+
+

Conversion. Line import skl2onnx.sklapi.register +was added to register the converters associated to these +new class. By default, only converters for scikit-learn are +declared.

+
onx = to_onnx(mod2, corpus)
+sess = InferenceSession(onx.SerializeToString())
+got = sess.run(None, {'X': corpus})
+
+
+

Let’s check if there are discrepancies…

+
assert_almost_equal(mod2.transform(corpus).todense(), got[0])
+
+
+

Total running time of the script: ( 0 minutes 0.047 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + + \ No newline at end of file diff --git a/auto_tutorial/plot_pextend_python_runtime.html b/auto_tutorial/plot_pextend_python_runtime.html index b899c0e6c..9302f1e5c 100644 --- a/auto_tutorial/plot_pextend_python_runtime.html +++ b/auto_tutorial/plot_pextend_python_runtime.html @@ -1,928 +1,747 @@ - - - - - - - - - Fast design with a python runtime — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

Fast design with a python runtime#

-

ONNX operators do not contain all operators -from numpy. There is no operator for -solve but this one -is needed to implement the prediction function -of model NMF. The converter can be written -including a new ONNX operator but then it requires a -runtime for it to be tested. This example shows how -to do that with the python runtime implemented in -mlprodict. It may not be onnxruntime -but that speeds up the implementation of the converter.

-

The example changes the transformer from -Implement a new converter, the method predict -decorrelates the variables by computing the eigen -values. Method fit does not do anything anymore.

- -
-

A transformer which decorrelates variables#

-

This time, the eigen values are not estimated at -training time but at prediction time.

-
from mlprodict.onnxrt.shape_object import ShapeObject
-from mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator
-from skl2onnx.algebra.onnx_ops import (
-    OnnxAdd,
-    OnnxCast,
-    OnnxDiv,
-    OnnxGatherElements,
-    OnnxEyeLike,
-    OnnxMatMul,
-    OnnxMul,
-    OnnxPow,
-    OnnxReduceMean,
-    OnnxShape,
-    OnnxSub,
-    OnnxTranspose,
-)
-from skl2onnx.algebra import OnnxOperator
-from mlprodict.onnxrt import OnnxInference
-from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
-import pickle
-from io import BytesIO
-import numpy
-from numpy.testing import assert_almost_equal
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.datasets import load_iris
-from skl2onnx.common.data_types import guess_numpy_type, guess_proto_type
-from skl2onnx import to_onnx
-from skl2onnx import update_registered_converter
-
-
-class LiveDecorrelateTransformer(TransformerMixin, BaseEstimator):
-    """
-    Decorrelates correlated gaussian features.
-
-    :param alpha: avoids non inversible matrices
-        by adding *alpha* identity matrix
-
-    *Attributes*
-
-    * `self.nf_`: number of expected features
-    """
-
-    def __init__(self, alpha=0.):
-        BaseEstimator.__init__(self)
-        TransformerMixin.__init__(self)
-        self.alpha = alpha
-
-    def fit(self, X, y=None, sample_weights=None):
-        if sample_weights is not None:
-            raise NotImplementedError(
-                "sample_weights != None is not implemented.")
-        self.nf_ = X.shape[1]
-        return self
-
-    def transform(self, X):
-        mean_ = numpy.mean(X, axis=0, keepdims=True)
-        X2 = X - mean_
-        V = X2.T @ X2 / X2.shape[0]
-        if self.alpha != 0:
-            V += numpy.identity(V.shape[0]) * self.alpha
-        L, P = numpy.linalg.eig(V)
-        Linv = L ** (-0.5)
-        diag = numpy.diag(Linv)
-        root = P @ diag @ P.transpose()
-        coef_ = root
-        return (X - mean_) @ coef_
-
-
-def test_live_decorrelate_transformer():
-    data = load_iris()
-    X = data.data
-
-    dec = LiveDecorrelateTransformer()
-    dec.fit(X)
-    pred = dec.transform(X)
-    cov = pred.T @ pred
-    cov /= cov[0, 0]
-    assert_almost_equal(numpy.identity(4), cov)
-
-    dec = LiveDecorrelateTransformer(alpha=1e-10)
-    dec.fit(X)
-    pred = dec.transform(X)
-    cov = pred.T @ pred
-    cov /= cov[0, 0]
-    assert_almost_equal(numpy.identity(4), cov)
-
-    st = BytesIO()
-    pickle.dump(dec, st)
-    dec2 = pickle.load(BytesIO(st.getvalue()))
-    assert_almost_equal(dec.transform(X), dec2.transform(X))
-
-
-test_live_decorrelate_transformer()
-
-
-

Everything works as expected.

-
-
-

Extend ONNX#

-

The conversion requires one operator to compute -the eigen values and vectors. The list of -ONNX operators does not contain anything -which produces eigen values. It does not seem -efficient to implement an algorithm with existing -ONNX operators to find eigen values. -A new operator must be -added, we give it the same name Eig as in numpy. -It would take a matrix and would produce one or two outputs, -the eigen values and the eigen vectors. -Just for the exercise, a parameter specifies -to output the eigen vectors as a second output.

-
-

New ONNX operator#

-

Any unknown operator can be -added to an ONNX graph. Operators are grouped by domain, -‘’ or ai.onnx refers to matrix computation. -ai.onnx.ml refers to usual machine learning models. -New domains are officially supported by onnx package. -We want to create a new operator Eig of domain onnxcustom. -It must be declared in a class, then a converter can use it.

-
class OnnxEig(OnnxOperator):
-    """
-    Defines a custom operator not defined by ONNX
-    specifications but in onnxruntime.
-    """
-
-    since_version = 1  # last changed in this version
-    expected_inputs = [('X', 'T')]  # input names and types
-    expected_outputs = [('EigenValues', 'T'),  # output names and types
-                        ('EigenVectors', 'T')]
-    input_range = [1, 1]  # only one input is allowed
-    output_range = [1, 2]  # 1 or 2 outputs are produced
-    is_deprecated = False  # obviously not deprecated
-    domain = 'onnxcustom'  # domain, anything is ok
-    operator_name = 'Eig'  # operator name
-    past_version = {}  # empty as it is the first version
-
-    def __init__(self, X, eigv=False, op_version=None, **kwargs):
-        """
-        :param X: array or OnnxOperatorMixin
-        :param eigv: also produces the eigen vectors
-        :param op_version: opset version
-        :param kwargs: additional parameters
-        """
-        OnnxOperator.__init__(
-            self, X, eigv=eigv, op_version=op_version, **kwargs)
-
-
-print(OnnxEig('X', eigv=True))
-
-
-

Out:

-
OnnxEig(1 in) -> ?
-
-
-

Now we can write the converter and -the shape calculator.

-
-
-

shape calculator#

-

Nothing new here.

-
def live_decorrelate_transformer_shape_calculator(operator):
-    op = operator.raw_operator
-    input_type = operator.inputs[0].type.__class__
-    input_dim = operator.inputs[0].type.shape[0]
-    output_type = input_type([input_dim, op.nf_])
-    operator.outputs[0].type = output_type
-
-
-
-
-

converter#

-

The converter is using the class OnnxEig. The code -is longer than previous converters as the computation is -more complex too.

-
def live_decorrelate_transformer_converter(scope, operator, container):
-    # shortcuts
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    # We retrieve the unique input.
-    X = operator.inputs[0]
-
-    # We guess its type. If the operator ingests float (or double),
-    # it outputs float (or double).
-    proto_dtype = guess_proto_type(X.type)
-    dtype = guess_numpy_type(X.type)
-
-    # Lines in comment specify the numpy computation
-    # the ONNX code implements.
-    # mean_ = numpy.mean(X, axis=0, keepdims=True)
-    mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv)
-
-    # This is trick I often use. The converter automatically
-    # chooses a name for every output. In big graph,
-    # it is difficult to know which operator is producing which output.
-    # This line just tells every node must prefix its ouputs with this string.
-    # It also applies to all inputs nodes unless this method
-    # was called for one of these nodes.
-    mean.set_onnx_name_prefix('mean')
-
-    # X2 = X - mean_
-    X2 = OnnxSub(X, mean, op_version=opv)
-
-    # V = X2.T @ X2 / X2.shape[0]
-    N = OnnxGatherElements(
-        OnnxShape(X, op_version=opv),
-        numpy.array([0], dtype=numpy.int64),
-        op_version=opv)
-    Nf = OnnxCast(N, to=proto_dtype, op_version=opv)
-
-    # Every output involved in N and Nf is prefixed by 'N'.
-    Nf.set_onnx_name_prefix('N')
-
-    V = OnnxDiv(
-        OnnxMatMul(OnnxTranspose(X2, op_version=opv),
-                   X2, op_version=opv),
-        Nf, op_version=opv)
-    V.set_onnx_name_prefix('V1')
-
-    # V += numpy.identity(V.shape[0]) * self.alpha
-    V = OnnxAdd(V,
-                op.alpha * numpy.identity(op.nf_, dtype=dtype),
-                op_version=opv)
-    V.set_onnx_name_prefix('V2')
-
-    # L, P = numpy.linalg.eig(V)
-    LP = OnnxEig(V, eigv=True, op_version=opv)
-    LP.set_onnx_name_prefix('LP')
-
-    # Linv = L ** (-0.5)
-    # Notation LP[0] means OnnxPow is taking the first output
-    # of operator OnnxEig, LP[1] would mean the second one
-    # LP is not allowed as it is ambiguous
-    Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype),
-                   op_version=opv)
-    Linv.set_onnx_name_prefix('Linv')
-
-    # diag = numpy.diag(Linv)
-    diag = OnnxMul(
-        OnnxEyeLike(
-            numpy.zeros((op.nf_, op.nf_), dtype=numpy.int64),
-            k=0, op_version=opv),
-        Linv, op_version=opv)
-    diag.set_onnx_name_prefix('diag')
-
-    # root = P @ diag @ P.transpose()
-    trv = OnnxTranspose(LP[1], op_version=opv)
-    coef_left = OnnxMatMul(LP[1], diag, op_version=opv)
-    coef_left.set_onnx_name_prefix('coef_left')
-    coef = OnnxMatMul(coef_left, trv, op_version=opv)
-    coef.set_onnx_name_prefix('coef')
-
-    # Same part as before.
-    Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])
-    Y.set_onnx_name_prefix('Y')
-
-    # The last line specifies the final output.
-    # Every node involved in the computation is added to the ONNX
-    # graph at this stage.
-    Y.add_to(scope, container)
-
-
-
-
-

Runtime for Eig#

-

Here comes the new part. The python runtime does not -implement any runtime for Eig. We need to tell the runtime -to compute eigen values and vectors every time operator Eig -is called. That means implementing two methods, -one to compute, one to infer the shape of the results. -The first one is mandatory, the second one can return an -empty shape if it depends on the inputs. If it is known, -the runtime may be able to optimize the computation, -by reducing allocation for example.

-
class OpEig(OpRunCustom):
-
-    op_name = 'Eig'  # operator name
-    atts = {'eigv': True}  # operator parameters
-
-    def __init__(self, onnx_node, desc=None, **options):
-        # constructor, every parameter is added a member
-        OpRunCustom.__init__(self, onnx_node, desc=desc,
-                             expected_attributes=OpEig.atts,
-                             **options)
-
-    def run(self, x, **kwargs):
-        # computation
-        if self.eigv:
-            return numpy.linalg.eig(x)
-        return (numpy.linalg.eigvals(x), )
-
-    def infer_shapes(self, x):
-        # shape inference, if you don't know what to
-        # write, just return `ShapeObject(None)`
-        if self.eigv:
-            return (
-                ShapeObject(
-                    x.shape, dtype=x.dtype,
-                    name=self.__class__.__name__ + 'Values'),
-                ShapeObject(
-                    x.shape, dtype=x.dtype,
-                    name=self.__class__.__name__ + 'Vectors'))
-        return (ShapeObject(x.shape, dtype=x.dtype,
-                            name=self.__class__.__name__), )
-
-
-
-
-

Registration#

-
update_registered_converter(
-    LiveDecorrelateTransformer, "SklearnLiveDecorrelateTransformer",
-    live_decorrelate_transformer_shape_calculator,
-    live_decorrelate_transformer_converter)
-
-
-
-
-
-

Final example#

-
data = load_iris()
-X = data.data
-
-dec = LiveDecorrelateTransformer()
-dec.fit(X)
-
-onx = to_onnx(dec, X.astype(numpy.float32))
-
-register_operator(OpEig, name='Eig', overwrite=False)
-
-oinf = OnnxInference(onx)
-
-exp = dec.transform(X.astype(numpy.float32))
-got = oinf.run({'X': X.astype(numpy.float32)})['variable']
-
-
-def diff(p1, p2):
-    p1 = p1.ravel()
-    p2 = p2.ravel()
-    d = numpy.abs(p2 - p1)
-    return d.max(), (d / numpy.abs(p1)).max()
-
-
-print(diff(exp, got))
-
-
-

Out:

-
(0.0, 0.0)
-
-
-

It works!

-
-
-

Final graph#

-
oinf = OnnxInference(onx)
-ax = plot_graphviz(oinf.to_dot())
-ax.get_xaxis().set_visible(False)
-ax.get_yaxis().set_visible(False)
-
-
-plot pextend python runtime

Total running time of the script: ( 0 minutes 0.441 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Fast design with a python runtime - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Fast design with a python runtime#

+

ONNX operators do not contain all operators +from numpy. There is no operator for +solve but this one +is needed to implement the prediction function +of model NMF. The converter can be written +including a new ONNX operator but then it requires a +runtime for it to be tested. This example shows how +to do that with the python runtime implemented in +mlprodict. It may not be onnxruntime +but that speeds up the implementation of the converter.

+

The example changes the transformer from +Implement a new converter, the method predict +decorrelates the variables by computing the eigen +values. Method fit does not do anything anymore.

+
+

A transformer which decorrelates variables#

+

This time, the eigen values are not estimated at +training time but at prediction time.

+
from mlprodict.onnxrt.ops_cpu import OpRunCustom, register_operator
+from skl2onnx.algebra.onnx_ops import (
+    OnnxAdd,
+    OnnxCast,
+    OnnxDiv,
+    OnnxGatherElements,
+    OnnxEyeLike,
+    OnnxMatMul,
+    OnnxMul,
+    OnnxPow,
+    OnnxReduceMean_13,
+    OnnxShape,
+    OnnxSub,
+    OnnxTranspose,
+)
+from skl2onnx.algebra import OnnxOperator
+from mlprodict.onnxrt import OnnxInference
+from pyquickhelper.helpgen.graphviz_helper import plot_graphviz
+import pickle
+from io import BytesIO
+import numpy
+from numpy.testing import assert_almost_equal
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.datasets import load_iris
+from skl2onnx.common.data_types import guess_numpy_type, guess_proto_type
+from skl2onnx import to_onnx
+from skl2onnx import update_registered_converter
+
+
+class LiveDecorrelateTransformer(TransformerMixin, BaseEstimator):
+    """
+    Decorrelates correlated gaussian features.
+
+    :param alpha: avoids non inversible matrices
+        by adding *alpha* identity matrix
+
+    *Attributes*
+
+    * `self.nf_`: number of expected features
+    """
+
+    def __init__(self, alpha=0.):
+        BaseEstimator.__init__(self)
+        TransformerMixin.__init__(self)
+        self.alpha = alpha
+
+    def fit(self, X, y=None, sample_weights=None):
+        if sample_weights is not None:
+            raise NotImplementedError(
+                "sample_weights != None is not implemented.")
+        self.nf_ = X.shape[1]
+        return self
+
+    def transform(self, X):
+        mean_ = numpy.mean(X, axis=0, keepdims=True)
+        X2 = X - mean_
+        V = X2.T @ X2 / X2.shape[0]
+        if self.alpha != 0:
+            V += numpy.identity(V.shape[0]) * self.alpha
+        L, P = numpy.linalg.eig(V)
+        Linv = L ** (-0.5)
+        diag = numpy.diag(Linv)
+        root = P @ diag @ P.transpose()
+        coef_ = root
+        return (X - mean_) @ coef_
+
+
+def test_live_decorrelate_transformer():
+    data = load_iris()
+    X = data.data
+
+    dec = LiveDecorrelateTransformer()
+    dec.fit(X)
+    pred = dec.transform(X)
+    cov = pred.T @ pred
+    cov /= cov[0, 0]
+    assert_almost_equal(numpy.identity(4), cov)
+
+    dec = LiveDecorrelateTransformer(alpha=1e-10)
+    dec.fit(X)
+    pred = dec.transform(X)
+    cov = pred.T @ pred
+    cov /= cov[0, 0]
+    assert_almost_equal(numpy.identity(4), cov)
+
+    st = BytesIO()
+    pickle.dump(dec, st)
+    dec2 = pickle.load(BytesIO(st.getvalue()))
+    assert_almost_equal(dec.transform(X), dec2.transform(X))
+
+
+test_live_decorrelate_transformer()
+
+
+

Everything works as expected.

+
+
+

Extend ONNX#

+

The conversion requires one operator to compute +the eigen values and vectors. The list of +ONNX operators does not contain anything +which produces eigen values. It does not seem +efficient to implement an algorithm with existing +ONNX operators to find eigen values. +A new operator must be +added, we give it the same name Eig as in numpy. +It would take a matrix and would produce one or two outputs, +the eigen values and the eigen vectors. +Just for the exercise, a parameter specifies +to output the eigen vectors as a second output.

+
+

New ONNX operator#

+

Any unknown operator can be +added to an ONNX graph. Operators are grouped by domain, +‘’ or ai.onnx refers to matrix computation. +ai.onnx.ml refers to usual machine learning models. +New domains are officially supported by onnx package. +We want to create a new operator Eig of domain onnxcustom. +It must be declared in a class, then a converter can use it.

+
class OnnxEig(OnnxOperator):
+    """
+    Defines a custom operator not defined by ONNX
+    specifications but in onnxruntime.
+    """
+
+    since_version = 1  # last changed in this version
+    expected_inputs = [('X', 'T')]  # input names and types
+    expected_outputs = [('EigenValues', 'T'),  # output names and types
+                        ('EigenVectors', 'T')]
+    input_range = [1, 1]  # only one input is allowed
+    output_range = [1, 2]  # 1 or 2 outputs are produced
+    is_deprecated = False  # obviously not deprecated
+    domain = 'onnxcustom'  # domain, anything is ok
+    operator_name = 'Eig'  # operator name
+    past_version = {}  # empty as it is the first version
+
+    def __init__(self, X, eigv=False, op_version=None, **kwargs):
+        """
+        :param X: array or OnnxOperatorMixin
+        :param eigv: also produces the eigen vectors
+        :param op_version: opset version
+        :param kwargs: additional parameters
+        """
+        OnnxOperator.__init__(
+            self, X, eigv=eigv, op_version=op_version, **kwargs)
+
+
+print(OnnxEig('X', eigv=True))
+
+
+
OnnxEig(1 in) -> ?
+
+
+

Now we can write the converter and +the shape calculator.

+
+
+

shape calculator#

+

Nothing new here.

+
def live_decorrelate_transformer_shape_calculator(operator):
+    op = operator.raw_operator
+    input_type = operator.inputs[0].type.__class__
+    input_dim = operator.inputs[0].type.shape[0]
+    output_type = input_type([input_dim, op.nf_])
+    operator.outputs[0].type = output_type
+
+
+
+
+

converter#

+

The converter is using the class OnnxEig. The code +is longer than previous converters as the computation is +more complex too.

+
def live_decorrelate_transformer_converter(scope, operator, container):
+    # shortcuts
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    # We retrieve the unique input.
+    X = operator.inputs[0]
+
+    # We guess its type. If the operator ingests float (or double),
+    # it outputs float (or double).
+    proto_dtype = guess_proto_type(X.type)
+    dtype = guess_numpy_type(X.type)
+
+    # Lines in comment specify the numpy computation
+    # the ONNX code implements.
+    # mean_ = numpy.mean(X, axis=0, keepdims=True)
+    mean = OnnxReduceMean_13(X, axes=[0], keepdims=1, op_version=opv)
+
+    # This is trick I often use. The converter automatically
+    # chooses a name for every output. In big graph,
+    # it is difficult to know which operator is producing which output.
+    # This line just tells every node must prefix its ouputs with this string.
+    # It also applies to all inputs nodes unless this method
+    # was called for one of these nodes.
+    mean.set_onnx_name_prefix('mean')
+
+    # X2 = X - mean_
+    X2 = OnnxSub(X, mean, op_version=opv)
+
+    # V = X2.T @ X2 / X2.shape[0]
+    N = OnnxGatherElements(
+        OnnxShape(X, op_version=opv),
+        numpy.array([0], dtype=numpy.int64),
+        op_version=opv)
+    Nf = OnnxCast(N, to=proto_dtype, op_version=opv)
+
+    # Every output involved in N and Nf is prefixed by 'N'.
+    Nf.set_onnx_name_prefix('N')
+
+    V = OnnxDiv(
+        OnnxMatMul(OnnxTranspose(X2, op_version=opv),
+                   X2, op_version=opv),
+        Nf, op_version=opv)
+    V.set_onnx_name_prefix('V1')
+
+    # V += numpy.identity(V.shape[0]) * self.alpha
+    V = OnnxAdd(V,
+                op.alpha * numpy.identity(op.nf_, dtype=dtype),
+                op_version=opv)
+    V.set_onnx_name_prefix('V2')
+
+    # L, P = numpy.linalg.eig(V)
+    LP = OnnxEig(V, eigv=True, op_version=opv)
+    LP.set_onnx_name_prefix('LP')
+
+    # Linv = L ** (-0.5)
+    # Notation LP[0] means OnnxPow is taking the first output
+    # of operator OnnxEig, LP[1] would mean the second one
+    # LP is not allowed as it is ambiguous
+    Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype),
+                   op_version=opv)
+    Linv.set_onnx_name_prefix('Linv')
+
+    # diag = numpy.diag(Linv)
+    diag = OnnxMul(
+        OnnxEyeLike(
+            numpy.zeros((op.nf_, op.nf_), dtype=numpy.int64),
+            k=0, op_version=opv),
+        Linv, op_version=opv)
+    diag.set_onnx_name_prefix('diag')
+
+    # root = P @ diag @ P.transpose()
+    trv = OnnxTranspose(LP[1], op_version=opv)
+    coef_left = OnnxMatMul(LP[1], diag, op_version=opv)
+    coef_left.set_onnx_name_prefix('coef_left')
+    coef = OnnxMatMul(coef_left, trv, op_version=opv)
+    coef.set_onnx_name_prefix('coef')
+
+    # Same part as before.
+    Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1])
+    Y.set_onnx_name_prefix('Y')
+
+    # The last line specifies the final output.
+    # Every node involved in the computation is added to the ONNX
+    # graph at this stage.
+    Y.add_to(scope, container)
+
+
+
+
+

Runtime for Eig#

+

Here comes the new part. The python runtime does not +implement any runtime for Eig. We need to tell the runtime +to compute eigen values and vectors every time operator Eig +is called. That means implementing two methods, +one to compute, one to infer the shape of the results. +The first one is mandatory, the second one can return an +empty shape if it depends on the inputs. If it is known, +the runtime may be able to optimize the computation, +by reducing allocation for example.

+
class OpEig(OpRunCustom):
+
+    op_name = 'Eig'  # operator name
+    atts = {'eigv': True}  # operator parameters
+
+    def __init__(self, onnx_node, desc=None, **options):
+        # constructor, every parameter is added a member
+        OpRunCustom.__init__(self, onnx_node, desc=desc,
+                             expected_attributes=OpEig.atts,
+                             **options)
+
+    def run(self, x, **kwargs):
+        # computation
+        if self.eigv:
+            return numpy.linalg.eig(x)
+        return (numpy.linalg.eigvals(x), )
+
+
+
+
+

Registration#

+
update_registered_converter(
+    LiveDecorrelateTransformer, "SklearnLiveDecorrelateTransformer",
+    live_decorrelate_transformer_shape_calculator,
+    live_decorrelate_transformer_converter)
+
+
+
+
+
+

Final example#

+
data = load_iris()
+X = data.data
+
+dec = LiveDecorrelateTransformer()
+dec.fit(X)
+
+onx = to_onnx(dec, X.astype(numpy.float32), target_opset=17)
+
+register_operator(OpEig, name='Eig', overwrite=False)
+
+oinf = OnnxInference(onx)
+
+exp = dec.transform(X.astype(numpy.float32))
+got = oinf.run({'X': X.astype(numpy.float32)})['variable']
+
+
+def diff(p1, p2):
+    p1 = p1.ravel()
+    p2 = p2.ravel()
+    d = numpy.abs(p2 - p1)
+    return d.max(), (d / numpy.abs(p1)).max()
+
+
+print(diff(exp, got))
+
+
+
(0.0, 0.0)
+
+
+

It works!

+
+
+

Final graph#

+
oinf = OnnxInference(onx)
+ax = plot_graphviz(oinf.to_dot())
+ax.get_xaxis().set_visible(False)
+ax.get_yaxis().set_visible(False)
+
+
+plot pextend python runtime

Total running time of the script: ( 0 minutes 0.229 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_qextend_onnxruntime.html b/auto_tutorial/plot_qextend_onnxruntime.html index f53398671..c57cccbb6 100644 --- a/auto_tutorial/plot_qextend_onnxruntime.html +++ b/auto_tutorial/plot_qextend_onnxruntime.html @@ -1,492 +1,367 @@ - - - - - - - - - Fast runtime with onnxruntime — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - -
- -
- - -
-

Fast runtime with onnxruntime#

-

ONNX operators does not contain operator -from numpy. There is no operator for -solve but this one -is needed to implement the prediction function -of model NMF. The converter can be written -including a new ONNX operator but then it requires a -runtime for it to be tested. Example -Fast design with a python runtime shows how to do that -with mlprodict. Doing the same with -onnxruntime is more ambitious as it requires -C++…

-

to be continued

-

Total running time of the script: ( 0 minutes 0.000 seconds)

- -

Gallery generated by Sphinx-Gallery

-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Fast runtime with onnxruntime - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Fast runtime with onnxruntime#

+

ONNX operators does not contain operator +from numpy. There is no operator for +solve but this one +is needed to implement the prediction function +of model NMF. The converter can be written +including a new ONNX operator but then it requires a +runtime for it to be tested. Example +Fast design with a python runtime shows how to do that +with mlprodict. Doing the same with +onnxruntime is more ambitious as it requires +C++…

+

to be continued

+

Total running time of the script: ( 0 minutes 0.000 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_transformer_discrepancy.html b/auto_tutorial/plot_transformer_discrepancy.html new file mode 100644 index 000000000..1b5c37057 --- /dev/null +++ b/auto_tutorial/plot_transformer_discrepancy.html @@ -0,0 +1,489 @@ + + + + + + + + + Dealing with discrepancies (tf-idf) - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Dealing with discrepancies (tf-idf)#

+

TfidfVectorizer +is one transform for which the corresponding converted onnx model +may produce different results. The larger the vocabulary is, +the higher the probability to get different result is. +This example proposes a equivalent model with no discrepancies.

+
+

Imports, setups#

+

All imports. It also registered onnx converters for :epgk:`xgboost` +and lightgbm.

+
import pprint
+import numpy
+from sklearn.pipeline import Pipeline
+from sklearn.compose import ColumnTransformer
+from sklearn.feature_extraction.text import TfidfVectorizer
+from onnxruntime import InferenceSession
+from skl2onnx import to_onnx
+
+
+def print_sparse_matrix(m):
+    nonan = numpy.nan_to_num(m)
+    mi, ma = nonan.min(), nonan.max()
+    if mi == ma:
+        ma += 1
+    mat = numpy.empty(m.shape, dtype=numpy.str_)
+    mat[:, :] = '.'
+    if hasattr(m, 'todense'):
+        dense = m.todense()
+    else:
+        dense = m
+    for i in range(m.shape[0]):
+        for j in range(m.shape[1]):
+            if dense[i, j] > 0:
+                c = int((dense[i, j] - mi) / (ma - mi) * 25)
+                mat[i, j] = chr(ord('A') + c)
+    return '\n'.join(''.join(line) for line in mat)
+
+
+def diff(a, b):
+    if a.shape != b.shape:
+        raise ValueError(
+            f"Cannot compare matrices with different shapes "
+            f"{a.shape} != {b.shape}.")
+    d = numpy.abs(a - b).sum() / a.size
+    return d
+
+
+
+
+

Artificial datasets#

+

Iris + a text column.

+
strings = numpy.array([
+    "This a sentence.",
+    "This a sentence with more characters $^*&'(-...",
+    """var = ClassName(var2, user=mail@anywhere.com, pwd"""
+    """=")_~-('&]@^\\`|[{#")""",
+    "c79857654",
+    "https://complex-url.com/;76543u3456?g=hhh&amp;h=23",
+    "01-03-05T11:12:13",
+    "https://complex-url.com/;dd76543u3456?g=ddhhh&amp;h=23",
+]).reshape((-1, 1))
+
+pprint.pprint(strings)
+
+
+
array([['This a sentence.'],
+       ["This a sentence with more characters $^*&'(-..."],
+       ['var = ClassName(var2, user=mail@anywhere.com, pwd=")_~-(\'&]@^\\`|[{#")'],
+       ['c79857654'],
+       ['https://complex-url.com/;76543u3456?g=hhh&amp;h=23'],
+       ['01-03-05T11:12:13'],
+       ['https://complex-url.com/;dd76543u3456?g=ddhhh&amp;h=23']],
+      dtype='<U69')
+
+
+
+
+

Fit a TfIdfVectorizer#

+
tfidf = Pipeline([
+    ('pre', ColumnTransformer([
+        ('tfidf', TfidfVectorizer(), 0)
+    ]))
+])
+
+
+

We leave a couple of strings out of the training set.

+
tfidf.fit(strings[:-2])
+tr = tfidf.transform(strings)
+tfidf_step = tfidf.steps[0][1].transformers_[0][1]
+# print(f"output columns: {tfidf_step.get_feature_names_out()}")
+print("rendered outputs")
+print(print_sparse_matrix(tr))
+
+
+
rendered outputs
+..............RR.....
+.....M......M.JJ....M
+...J..JH...J.J...JJJ.
+....Z................
+JJJ....HJJJ.....J....
+.....................
+K.K....IK.K.....K....
+
+
+
+
+

Conversion to ONNX#

+
onx = to_onnx(tfidf, strings)
+
+
+
+
+

Execution with ONNX#

+
sess = InferenceSession(onx.SerializeToString())
+got = sess.run(None, {'X': strings})[0]
+print(f"differences={diff(tr, got):g}")
+print(print_sparse_matrix(got))
+
+
+
differences=3.25823e-08
+..............RR.....
+.....M......M.JJ....M
+...J..JH...J.J...JJJ.
+....Z................
+JJJ....HJJJ.....J....
+.....................
+K.K....IK.K.....K....
+
+
+

Total running time of the script: ( 0 minutes 0.030 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/auto_tutorial/plot_usparse_xgboost.html b/auto_tutorial/plot_usparse_xgboost.html index d2f2f0927..40cb37113 100644 --- a/auto_tutorial/plot_usparse_xgboost.html +++ b/auto_tutorial/plot_usparse_xgboost.html @@ -1,1036 +1,885 @@ - - - - - - - - - TfIdf and sparse matrices — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - -
- -
- - -
-

TfIdf and sparse matrices#

-

TfidfVectorizer -usually creates sparse data. If the data is sparse enough, matrices -usually stays as sparse all along the pipeline until the predictor -is trained. Sparse matrices do not consider null and missing values -as they are not present in the datasets. Because some predictors -do the difference, this ambiguity may introduces discrepencies -when converter into ONNX. This example looks into several configurations.

- -
-

Imports, setups#

-

All imports. It also registered onnx converters for :epgk:`xgboost` -and lightgbm.

-
import warnings
-import numpy
-import pandas
-import onnxruntime as rt
-from tqdm import tqdm
-from sklearn.compose import ColumnTransformer
-from sklearn.datasets import load_iris
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
-from sklearn.ensemble import RandomForestClassifier
-try:
-    from sklearn.ensemble import HistGradientBoostingClassifier
-except ImportError:
-    HistGradientBoostingClassifier = None
-from xgboost import XGBClassifier
-from lightgbm import LGBMClassifier
-from skl2onnx.common.data_types import FloatTensorType, StringTensorType
-from skl2onnx import to_onnx, update_registered_converter
-from skl2onnx.sklapi import CastTransformer, ReplaceTransformer
-from skl2onnx.common.shape_calculator import (
-    calculate_linear_classifier_output_shapes)
-from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
-    convert_xgboost)
-from onnxmltools.convert.lightgbm.operator_converters.LightGbm import (
-    convert_lightgbm)
-
-
-update_registered_converter(
-    XGBClassifier, 'XGBoostXGBClassifier',
-    calculate_linear_classifier_output_shapes, convert_xgboost,
-    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
-update_registered_converter(
-    LGBMClassifier, 'LightGbmLGBMClassifier',
-    calculate_linear_classifier_output_shapes, convert_lightgbm,
-    options={'nocl': [True, False], 'zipmap': [True, False]})
-
-
-
-
-

Artificial datasets#

-

Iris + a text column.

-
cst = ['class zero', 'class one', 'class two']
-
-data = load_iris()
-X = data.data[:, :2]
-y = data.target
-
-df = pandas.DataFrame(X)
-df["text"] = [cst[i] for i in y]
-
-
-ind = numpy.arange(X.shape[0])
-numpy.random.shuffle(ind)
-X = X[ind, :].copy()
-y = y[ind].copy()
-
-
-
-
-

Train ensemble after sparse#

-

The example use the Iris datasets with artifical text datasets -preprocessed with a tf-idf. sparse_threshold=1. avoids -sparse matrices to be converted into dense matrices.

-
def make_pipelines(df_train, y_train, models=None,
-                   sparse_threshold=1., replace_nan=False,
-                   insert_replace=False):
-
-    if models is None:
-        models = [
-            RandomForestClassifier, HistGradientBoostingClassifier,
-            XGBClassifier, LGBMClassifier]
-    models = [_ for _ in models if _ is not None]
-
-    pipes = []
-    for model in tqdm(models):
-
-        if model == HistGradientBoostingClassifier:
-            kwargs = dict(max_iter=5)
-        elif model == XGBClassifier:
-            kwargs = dict(n_estimators=5, use_label_encoder=False)
-        else:
-            kwargs = dict(n_estimators=5)
-
-        if insert_replace:
-            pipe = Pipeline([
-                ('union', ColumnTransformer([
-                    ('scale1', StandardScaler(), [0, 1]),
-                    ('subject',
-                     Pipeline([
-                         ('count', CountVectorizer()),
-                         ('tfidf', TfidfTransformer()),
-                         ('repl', ReplaceTransformer()),
-                     ]), "text"),
-                ], sparse_threshold=sparse_threshold)),
-                ('cast', CastTransformer()),
-                ('cls', model(max_depth=3, **kwargs)),
-            ])
-        else:
-            pipe = Pipeline([
-                ('union', ColumnTransformer([
-                    ('scale1', StandardScaler(), [0, 1]),
-                    ('subject',
-                     Pipeline([
-                         ('count', CountVectorizer()),
-                         ('tfidf', TfidfTransformer())
-                     ]), "text"),
-                ], sparse_threshold=sparse_threshold)),
-                ('cast', CastTransformer()),
-                ('cls', model(max_depth=3, **kwargs)),
-            ])
-
-        try:
-            pipe.fit(df_train, y_train)
-        except TypeError as e:
-            obs = dict(model=model.__name__, pipe=pipe, error=e)
-            pipes.append(obs)
-            continue
-
-        options = {model: {'zipmap': False}}
-        if replace_nan:
-            options[TfidfTransformer] = {'nan': True}
-
-        # convert
-        with warnings.catch_warnings(record=False):
-            warnings.simplefilter("ignore", (FutureWarning, UserWarning))
-            model_onnx = to_onnx(
-                pipe,
-                initial_types=[('input', FloatTensorType([None, 2])),
-                               ('text', StringTensorType([None, 1]))],
-                target_opset={'': 12, 'ai.onnx.ml': 2},
-                options=options)
-
-        with open('model.onnx', 'wb') as f:
-            f.write(model_onnx.SerializeToString())
-
-        sess = rt.InferenceSession(model_onnx.SerializeToString())
-        inputs = {"input": df[[0, 1]].values.astype(numpy.float32),
-                  "text": df[["text"]].values}
-        pred_onx = sess.run(None, inputs)
-
-        diff = numpy.abs(
-            pred_onx[1].ravel() -
-            pipe.predict_proba(df).ravel()).sum()
-
-        obs = dict(model=model.__name__,
-                   discrepencies=diff,
-                   model_onnx=model_onnx, pipe=pipe)
-        pipes.append(obs)
-
-    return pipes
-
-
-data_sparse = make_pipelines(df, y)
-stat = pandas.DataFrame(data_sparse).drop(['model_onnx', 'pipe'], axis=1)
-if 'error' in stat.columns:
-    print(stat.drop('error', axis=1))
-stat
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s][14:44:16] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-
- 75%|#######################################             | 3/4 [00:00<00:00, 20.27it/s]
-100%|####################################################| 4/4 [00:00<00:00, 19.05it/s]
-                            model  discrepencies
-0          RandomForestClassifier       0.000006
-1  HistGradientBoostingClassifier            NaN
-2                   XGBClassifier      22.189188
-3                  LGBMClassifier       0.000007
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
modeldiscrepencieserror
0RandomForestClassifier0.000006NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier22.189188NaN
3LGBMClassifier0.000007NaN
-
-
-
-

Sparse data hurts.

-
-
-

Dense data#

-

Let’s replace sparse data with dense by using sparse_threshold=0.

-
data_dense = make_pipelines(df, y, sparse_threshold=0.)
-stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
-if 'error' in stat.columns:
-    print(stat.drop('error', axis=1))
-stat
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s]
- 50%|##########################                          | 2/4 [00:00<00:00, 13.25it/s][14:44:16] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-
-100%|####################################################| 4/4 [00:00<00:00, 14.30it/s]
-100%|####################################################| 4/4 [00:00<00:00, 14.08it/s]
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
modeldiscrepencies
0RandomForestClassifier0.000004
1HistGradientBoostingClassifier0.000005
2XGBClassifier0.000004
3LGBMClassifier0.000007
-
-
-
-

This is much better. Let’s compare how the preprocessing -applies on the data.

-
print("sparse")
-print(data_sparse[-1]['pipe'].steps[0][-1].transform(df)[:2])
-print()
-print("dense")
-print(data_dense[-1]['pipe'].steps[0][-1].transform(df)[:2])
-
-
-

Out:

-
sparse
-  (0, 0)        -0.9006811702978088
-  (0, 1)        1.019004351971607
-  (0, 2)        0.4323732931220851
-  (0, 5)        0.9016947018779491
-  (1, 0)        -1.1430169111851105
-  (1, 1)        -0.13197947932162468
-  (1, 2)        0.4323732931220851
-  (1, 5)        0.9016947018779491
-
-dense
-[[-0.90068117  1.01900435  0.43237329  0.          0.          0.9016947 ]
- [-1.14301691 -0.13197948  0.43237329  0.          0.          0.9016947 ]]
-
-
-

This shows RandomForestClassifier, -XGBClassifier do not process -the same way sparse and -dense matrix as opposed to LGBMClassifier. -And HistGradientBoostingClassifier -fails.

-
-
-

Dense data with nan#

-

Let’s keep sparse data in the scikit-learn pipeline but -replace null values by nan in the onnx graph.

-
data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=True)
-stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
-if 'error' in stat.columns:
-    print(stat.drop('error', axis=1))
-stat
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s][14:44:16] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-
- 75%|#######################################             | 3/4 [00:00<00:00, 21.65it/s]
-100%|####################################################| 4/4 [00:00<00:00, 19.90it/s]
-                            model  discrepencies
-0          RandomForestClassifier      43.120465
-1  HistGradientBoostingClassifier            NaN
-2                   XGBClassifier       0.000004
-3                  LGBMClassifier       0.000007
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
modeldiscrepencieserror
0RandomForestClassifier43.120465NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier0.000004NaN
3LGBMClassifier0.000007NaN
-
-
-
-
-
-

Dense, 0 replaced by nan#

-

Instead of using a specific options to replace null values -into nan values, a custom transformer called -ReplaceTransformer is explicitely inserted into the pipeline. -A new converter is added to the list of supported models. -It is equivalent to the previous options except it is -more explicit.

-
data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=False,
-                            insert_replace=True)
-stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
-if 'error' in stat.columns:
-    print(stat.drop('error', axis=1))
-stat
-
-
-

Out:

-
  0%|                                                            | 0/4 [00:00<?, ?it/s][14:44:16] WARNING: C:/Users/Administrator/workspace/xgboost-win64_release_1.5.1/src/learner.cc:1115: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'multi:softprob' was changed from 'merror' to 'mlogloss'. Explicitly set eval_metric if you'd like to restore the old behavior.
-
- 75%|#######################################             | 3/4 [00:00<00:00, 21.74it/s]
-100%|####################################################| 4/4 [00:00<00:00, 19.80it/s]
-                            model  discrepencies
-0          RandomForestClassifier      22.411109
-1  HistGradientBoostingClassifier            NaN
-2                   XGBClassifier       0.000004
-3                  LGBMClassifier       0.000007
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
modeldiscrepencieserror
0RandomForestClassifier22.411109NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier0.000004NaN
3LGBMClassifier0.000007NaN
-
-
-
-
-
-

Conclusion#

-

Unless dense arrays are used, because onnxruntime -ONNX does not support sparse yet, the conversion needs to be -tuned depending on the model which follows the TfIdf preprocessing.

-

Total running time of the script: ( 0 minutes 0.950 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + TfIdf and sparse matrices - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

TfIdf and sparse matrices#

+

TfidfVectorizer +usually creates sparse data. If the data is sparse enough, matrices +usually stays as sparse all along the pipeline until the predictor +is trained. Sparse matrices do not consider null and missing values +as they are not present in the datasets. Because some predictors +do the difference, this ambiguity may introduces discrepencies +when converter into ONNX. This example looks into several configurations.

+
+

Imports, setups#

+

All imports. It also registered onnx converters for :epgk:`xgboost` +and lightgbm.

+
import warnings
+import numpy
+import pandas
+import onnxruntime as rt
+from tqdm import tqdm
+from sklearn.compose import ColumnTransformer
+from sklearn.datasets import load_iris
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
+from sklearn.ensemble import RandomForestClassifier
+try:
+    from sklearn.ensemble import HistGradientBoostingClassifier
+except ImportError:
+    HistGradientBoostingClassifier = None
+from xgboost import XGBClassifier
+from lightgbm import LGBMClassifier
+from skl2onnx.common.data_types import FloatTensorType, StringTensorType
+from skl2onnx import to_onnx, update_registered_converter
+from skl2onnx.sklapi import CastTransformer, ReplaceTransformer
+from skl2onnx.common.shape_calculator import (
+    calculate_linear_classifier_output_shapes)
+from onnxmltools.convert.xgboost.operator_converters.XGBoost import (
+    convert_xgboost)
+from onnxmltools.convert.lightgbm.operator_converters.LightGbm import (
+    convert_lightgbm)
+
+
+update_registered_converter(
+    XGBClassifier, 'XGBoostXGBClassifier',
+    calculate_linear_classifier_output_shapes, convert_xgboost,
+    options={'nocl': [True, False], 'zipmap': [True, False, 'columns']})
+update_registered_converter(
+    LGBMClassifier, 'LightGbmLGBMClassifier',
+    calculate_linear_classifier_output_shapes, convert_lightgbm,
+    options={'nocl': [True, False], 'zipmap': [True, False]})
+
+
+
+
+

Artificial datasets#

+

Iris + a text column.

+
cst = ['class zero', 'class one', 'class two']
+
+data = load_iris()
+X = data.data[:, :2]
+y = data.target
+
+df = pandas.DataFrame(X)
+df.columns = [f"c{c}" for c in df.columns]
+df["text"] = [cst[i] for i in y]
+
+
+ind = numpy.arange(X.shape[0])
+numpy.random.shuffle(ind)
+X = X[ind, :].copy()
+y = y[ind].copy()
+
+
+
+
+

Train ensemble after sparse#

+

The example use the Iris datasets with artifical text datasets +preprocessed with a tf-idf. sparse_threshold=1. avoids +sparse matrices to be converted into dense matrices.

+
def make_pipelines(df_train, y_train, models=None,
+                   sparse_threshold=1., replace_nan=False,
+                   insert_replace=False):
+
+    if models is None:
+        models = [
+            RandomForestClassifier, HistGradientBoostingClassifier,
+            XGBClassifier, LGBMClassifier]
+    models = [_ for _ in models if _ is not None]
+
+    pipes = []
+    for model in tqdm(models):
+
+        if model == HistGradientBoostingClassifier:
+            kwargs = dict(max_iter=5)
+        elif model == XGBClassifier:
+            kwargs = dict(n_estimators=5, use_label_encoder=False)
+        else:
+            kwargs = dict(n_estimators=5)
+
+        if insert_replace:
+            pipe = Pipeline([
+                ('union', ColumnTransformer([
+                    ('scale1', StandardScaler(), [0, 1]),
+                    ('subject',
+                     Pipeline([
+                         ('count', CountVectorizer()),
+                         ('tfidf', TfidfTransformer()),
+                         ('repl', ReplaceTransformer()),
+                     ]), "text"),
+                ], sparse_threshold=sparse_threshold)),
+                ('cast', CastTransformer()),
+                ('cls', model(max_depth=3, **kwargs)),
+            ])
+        else:
+            pipe = Pipeline([
+                ('union', ColumnTransformer([
+                    ('scale1', StandardScaler(), [0, 1]),
+                    ('subject',
+                     Pipeline([
+                         ('count', CountVectorizer()),
+                         ('tfidf', TfidfTransformer())
+                     ]), "text"),
+                ], sparse_threshold=sparse_threshold)),
+                ('cast', CastTransformer()),
+                ('cls', model(max_depth=3, **kwargs)),
+            ])
+
+        try:
+            pipe.fit(df_train, y_train)
+        except TypeError as e:
+            obs = dict(model=model.__name__, pipe=pipe, error=e,
+                       model_onnx=None)
+            pipes.append(obs)
+            continue
+
+        options = {model: {'zipmap': False}}
+        if replace_nan:
+            options[TfidfTransformer] = {'nan': True}
+
+        # convert
+        with warnings.catch_warnings(record=False):
+            warnings.simplefilter("ignore", (FutureWarning, UserWarning))
+            model_onnx = to_onnx(
+                pipe,
+                initial_types=[('input', FloatTensorType([None, 2])),
+                               ('text', StringTensorType([None, 1]))],
+                target_opset={'': 12, 'ai.onnx.ml': 2},
+                options=options)
+
+        with open('model.onnx', 'wb') as f:
+            f.write(model_onnx.SerializeToString())
+
+        sess = rt.InferenceSession(model_onnx.SerializeToString())
+        inputs = {"input": df[["c0", "c1"]].values.astype(numpy.float32),
+                  "text": df[["text"]].values}
+        pred_onx = sess.run(None, inputs)
+
+        diff = numpy.abs(
+            pred_onx[1].ravel() -
+            pipe.predict_proba(df).ravel()).sum()
+
+        obs = dict(model=model.__name__,
+                   discrepencies=diff,
+                   model_onnx=model_onnx, pipe=pipe)
+        pipes.append(obs)
+
+    return pipes
+
+
+data_sparse = make_pipelines(df, y)
+stat = pandas.DataFrame(data_sparse).drop(['model_onnx', 'pipe'], axis=1)
+if 'error' in stat.columns:
+    print(stat.drop('error', axis=1))
+stat
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]/home/xadupre/.local/lib/python3.10/site-packages/xgboost/sklearn.py:1395: UserWarning: `use_label_encoder` is deprecated in 1.7.0.
+  warnings.warn("`use_label_encoder` is deprecated in 1.7.0.")
+
+100%|##########| 4/4 [00:00<00:00, 28.56it/s]
+100%|##########| 4/4 [00:00<00:00, 28.49it/s]
+                            model  discrepencies
+0          RandomForestClassifier       0.000004
+1  HistGradientBoostingClassifier            NaN
+2                   XGBClassifier       5.278442
+3                  LGBMClassifier       0.000009
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
modeldiscrepencieserror
0RandomForestClassifier0.000004NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier5.278442NaN
3LGBMClassifier0.000009NaN
+
+
+
+

Sparse data hurts.

+
+
+

Dense data#

+

Let’s replace sparse data with dense by using sparse_threshold=0.

+
data_dense = make_pipelines(df, y, sparse_threshold=0.)
+stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
+if 'error' in stat.columns:
+    print(stat.drop('error', axis=1))
+stat
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]
+ 50%|#####     | 2/4 [00:00<00:00, 10.43it/s]/home/xadupre/.local/lib/python3.10/site-packages/xgboost/sklearn.py:1395: UserWarning: `use_label_encoder` is deprecated in 1.7.0.
+  warnings.warn("`use_label_encoder` is deprecated in 1.7.0.")
+
+100%|##########| 4/4 [00:00<00:00, 14.43it/s]
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
modeldiscrepencies
0RandomForestClassifier0.000005
1HistGradientBoostingClassifier0.000005
2XGBClassifier0.000006
3LGBMClassifier0.000009
+
+
+
+

This is much better. Let’s compare how the preprocessing +applies on the data.

+
print("sparse")
+print(data_sparse[-1]['pipe'].steps[0][-1].transform(df)[:2])
+print()
+print("dense")
+print(data_dense[-1]['pipe'].steps[0][-1].transform(df)[:2])
+
+
+
sparse
+  (0, 0)        -0.9006811702978088
+  (0, 1)        1.019004351971607
+  (0, 2)        0.4323732931220851
+  (0, 5)        0.9016947018779491
+  (1, 0)        -1.1430169111851105
+  (1, 1)        -0.13197947932162468
+  (1, 2)        0.4323732931220851
+  (1, 5)        0.9016947018779491
+
+dense
+[[-0.90068117  1.01900435  0.43237329  0.          0.          0.9016947 ]
+ [-1.14301691 -0.13197948  0.43237329  0.          0.          0.9016947 ]]
+
+
+

This shows RandomForestClassifier, +XGBClassifier do not process +the same way sparse and +dense matrix as opposed to LGBMClassifier. +And HistGradientBoostingClassifier +fails.

+
+
+

Dense data with nan#

+

Let’s keep sparse data in the scikit-learn pipeline but +replace null values by nan in the onnx graph.

+
data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=True)
+stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
+if 'error' in stat.columns:
+    print(stat.drop('error', axis=1))
+stat
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]/home/xadupre/.local/lib/python3.10/site-packages/xgboost/sklearn.py:1395: UserWarning: `use_label_encoder` is deprecated in 1.7.0.
+  warnings.warn("`use_label_encoder` is deprecated in 1.7.0.")
+
+ 75%|#######5  | 3/4 [00:00<00:00, 24.84it/s]
+100%|##########| 4/4 [00:00<00:00, 22.20it/s]
+                            model  discrepencies
+0          RandomForestClassifier      35.336951
+1  HistGradientBoostingClassifier            NaN
+2                   XGBClassifier       0.000006
+3                  LGBMClassifier       0.000009
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
modeldiscrepencieserror
0RandomForestClassifier35.336951NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier0.000006NaN
3LGBMClassifier0.000009NaN
+
+
+
+
+
+

Dense, 0 replaced by nan#

+

Instead of using a specific options to replace null values +into nan values, a custom transformer called +ReplaceTransformer is explicitely inserted into the pipeline. +A new converter is added to the list of supported models. +It is equivalent to the previous options except it is +more explicit.

+
data_dense = make_pipelines(df, y, sparse_threshold=1., replace_nan=False,
+                            insert_replace=True)
+stat = pandas.DataFrame(data_dense).drop(['model_onnx', 'pipe'], axis=1)
+if 'error' in stat.columns:
+    print(stat.drop('error', axis=1))
+stat
+
+
+
  0%|          | 0/4 [00:00<?, ?it/s]/home/xadupre/.local/lib/python3.10/site-packages/xgboost/sklearn.py:1395: UserWarning: `use_label_encoder` is deprecated in 1.7.0.
+  warnings.warn("`use_label_encoder` is deprecated in 1.7.0.")
+
+ 75%|#######5  | 3/4 [00:00<00:00, 25.50it/s]
+100%|##########| 4/4 [00:00<00:00, 24.67it/s]
+                            model  discrepencies
+0          RandomForestClassifier      34.057222
+1  HistGradientBoostingClassifier            NaN
+2                   XGBClassifier       0.000006
+3                  LGBMClassifier       0.000009
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
modeldiscrepencieserror
0RandomForestClassifier34.057222NaN
1HistGradientBoostingClassifierNaNA sparse matrix was passed, but dense data is ...
2XGBClassifier0.000006NaN
3LGBMClassifier0.000009NaN
+
+
+
+
+
+

Conclusion#

+

Unless dense arrays are used, because onnxruntime +ONNX does not support sparse yet, the conversion needs to be +tuned depending on the model which follows the TfIdf preprocessing.

+

Total running time of the script: ( 0 minutes 0.807 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_wext_pyod_forest.html b/auto_tutorial/plot_wext_pyod_forest.html index 12c524208..e0cba497a 100644 --- a/auto_tutorial/plot_wext_pyod_forest.html +++ b/auto_tutorial/plot_wext_pyod_forest.html @@ -1,685 +1,590 @@ - - - - - - - - - Converter for pyod.models.iforest.IForest — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Converter for pyod.models.iforest.IForest#

-

This example answers issues 685. -It implements a custom converter for model pyod.models.iforest.IForest. -This example uses Implement a new converter as a start.

- -
-

Trains a model#

-

All imports. It also registered onnx converters for :epgk:`xgboost` -and lightgbm.

-
import numpy as np
-import pandas as pd
-from onnxruntime import InferenceSession
-from sklearn.preprocessing import MinMaxScaler
-from skl2onnx.proto import onnx_proto
-from skl2onnx.common.data_types import (
-    FloatTensorType, Int64TensorType, guess_numpy_type)
-from skl2onnx import to_onnx, update_registered_converter, get_model_alias
-from skl2onnx.algebra.onnx_ops import (
-    OnnxIdentity, OnnxMul, OnnxLess, OnnxConcat, OnnxCast, OnnxAdd,
-    OnnxClip)
-from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
-try:
-    from pyod.models.iforest import IForest
-except (ValueError, ImportError) as e:
-    print("Unable to import pyod:", e)
-    IForest = None
-
-if IForest is not None:
-    data1 = {'First':  [500, 500, 400, 100, 200, 300, 100],
-             'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']}
-
-    df1 = pd.DataFrame(data1, columns=['First', 'Second'])
-    dumdf1 = pd.get_dummies(df1)
-    scaler = MinMaxScaler()
-    scaler.partial_fit(dumdf1)
-    sc_data = scaler.transform(dumdf1)
-    model1 = IForest(n_estimators=10, bootstrap=True, behaviour='new',
-                     contamination=0.1, random_state=np.random.RandomState(42),
-                     verbose=1, n_jobs=-1).fit(sc_data)
-    feature_names2 = dumdf1.columns
-
-    initial_type = [('float_input',
-                     FloatTensorType([None, len(feature_names2)]))]
-
-
-

Out:

-
Unable to import pyod: No module named 'pyod'
-
-
-

We check that the conversion fails as expected.

-
if IForest is not None:
-    try:
-        to_onnx(model1, initial_types=initial_type)
-    except Exception as e:
-        print(e)
-
-
-
-
-

Custom converter#

-

First the parser and the shape calculator. -The parser defines the number of outputs and their type. -The shape calculator defines their dimensions.

-
def pyod_iforest_parser(scope, model, inputs, custom_parsers=None):
-    alias = get_model_alias(type(model))
-    this_operator = scope.declare_local_operator(alias, model)
-
-    # inputs
-    this_operator.inputs.append(inputs[0])
-
-    # outputs
-    cls_type = inputs[0].type.__class__
-    val_y1 = scope.declare_local_variable('label', Int64TensorType())
-    val_y2 = scope.declare_local_variable('probability', cls_type())
-    this_operator.outputs.append(val_y1)
-    this_operator.outputs.append(val_y2)
-
-    # end
-    return this_operator.outputs
-
-
-def pyod_iforest_shape_calculator(operator):
-    N = operator.inputs[0].get_first_dimension()
-    operator.outputs[0].type.shape = [N, 1]
-    operator.outputs[1].type.shape = [N, 2]
-
-
-

Then the converter.

-
def pyod_iforest_converter(scope, operator, container):
-    op = operator.raw_operator
-    opv = container.target_opset
-    out = operator.outputs
-
-    # We retrieve the unique input.
-    X = operator.inputs[0]
-
-    # In most case, computation happen in floats.
-    # But it might be with double. ONNX is very strict
-    # about types, every constant should have the same
-    # type as the input.
-    dtype = guess_numpy_type(X.type)
-
-    detector = op.detector_  # Should be IForest from scikit-learn.
-    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
-    scores = OnnxIdentity(lab_pred[1], op_version=opv)
-
-    # labels
-    threshold = op.threshold_
-    above = OnnxLess(scores, np.array([threshold], dtype=dtype),
-                     op_version=opv)
-    labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64,
-                      output_names=out[:1])
-
-    # probabilities
-    train_scores = op.decision_scores_
-    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
-    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype),
-                      op_version=opv)
-    print(scaler.min_)
-    print(scaler.scale_)
-
-    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
-    scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype),
-                              op_version=opv)
-    clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype),
-                       np.array([1], dtype=dtype),
-                       op_version=opv)
-    clipped_ = OnnxAdd(
-        OnnxMul(clipped, np.array([-1], dtype=dtype),
-                op_version=opv),
-        np.array([1], dtype=dtype),
-        op_version=opv)
-
-    scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv,
-                           output_names=out[1:])
-
-    labels.add_to(scope, container)
-    scores_2d.add_to(scope, container)
-
-
-

Finally the registration.

-
if IForest is not None:
-    update_registered_converter(
-        IForest, "PyodIForest",
-        pyod_iforest_shape_calculator,
-        pyod_iforest_converter,
-        parser=pyod_iforest_parser)
-
-
-

And the conversion.

-
if IForest is not None:
-    onx = to_onnx(model1, initial_types=initial_type,
-                  target_opset={'': 14, 'ai.onnx.ml': 2})
-
-
-
-
-

Checking discrepencies#

-
if IForest is not None:
-    data = sc_data.astype(np.float32)
-
-    expected_labels = model1.predict(data)
-    expected_proba = model1.predict_proba(data)
-
-    sess = InferenceSession(onx.SerializeToString())
-    res = sess.run(None, {'float_input': data})
-
-    onx_labels = res[0]
-    onx_proba = res[1]
-
-    diff_labels = np.abs(onx_labels.ravel() - expected_labels.ravel()).max()
-    diff_proba = np.abs(onx_proba.ravel() - expected_proba.ravel()).max()
-
-    print("dicrepencies:", diff_labels, diff_proba)
-
-    print("ONNX labels", onx_labels)
-    print("ONNX probabilities", onx_proba)
-
-
-

Total running time of the script: ( 0 minutes 0.006 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Converter for pyod.models.iforest.IForest - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Converter for pyod.models.iforest.IForest#

+

This example answers issues 685. +It implements a custom converter for model pyod.models.iforest.IForest. +This example uses Implement a new converter as a start.

+
+

Trains a model#

+

All imports. It also registered onnx converters for :epgk:`xgboost` +and lightgbm.

+
import numpy as np
+import pandas as pd
+from onnxruntime import InferenceSession
+from sklearn.preprocessing import MinMaxScaler
+from skl2onnx.proto import onnx_proto
+from skl2onnx.common.data_types import (
+    FloatTensorType, Int64TensorType, guess_numpy_type)
+from skl2onnx import to_onnx, update_registered_converter, get_model_alias
+from skl2onnx.algebra.onnx_ops import (
+    OnnxIdentity, OnnxMul, OnnxLess, OnnxConcat, OnnxCast, OnnxAdd,
+    OnnxClip)
+from skl2onnx.algebra.onnx_operator import OnnxSubEstimator
+try:
+    from pyod.models.iforest import IForest
+except (ValueError, ImportError) as e:
+    print("Unable to import pyod:", e)
+    IForest = None
+
+if IForest is not None:
+    data1 = {'First': [500, 500, 400, 100, 200, 300, 100],
+             'Second': ['a', 'b', 'a', 'b', 'a', 'b', 'c']}
+
+    df1 = pd.DataFrame(data1, columns=['First', 'Second'])
+    dumdf1 = pd.get_dummies(df1)
+    scaler = MinMaxScaler()
+    scaler.partial_fit(dumdf1)
+    sc_data = scaler.transform(dumdf1)
+    model1 = IForest(n_estimators=10, bootstrap=True, behaviour='new',
+                     contamination=0.1, random_state=np.random.RandomState(42),
+                     verbose=1, n_jobs=-1).fit(sc_data)
+    feature_names2 = dumdf1.columns
+
+    initial_type = [('float_input',
+                     FloatTensorType([None, len(feature_names2)]))]
+
+
+
[Parallel(n_jobs=8)]: Using backend ThreadingBackend with 8 concurrent workers.
+[Parallel(n_jobs=8)]: Done   2 out of   8 | elapsed:    0.0s remaining:    0.0s
+[Parallel(n_jobs=8)]: Done   8 out of   8 | elapsed:    0.0s finished
+
+
+

We check that the conversion fails as expected.

+
if IForest is not None:
+    try:
+        to_onnx(model1, initial_types=initial_type)
+    except Exception as e:
+        print(e)
+
+
+
Unable to find a shape calculator for type '<class 'pyod.models.iforest.IForest'>'.
+It usually means the pipeline being converted contains a
+transformer or a predictor with no corresponding converter
+implemented in sklearn-onnx. If the converted is implemented
+in another library, you need to register
+the converted so that it can be used by sklearn-onnx (function
+update_registered_converter). If the model is not yet covered
+by sklearn-onnx, you may raise an issue to
+https://github.com/onnx/sklearn-onnx/issues
+to get the converter implemented or even contribute to the
+project. If the model is a custom model, a new converter must
+be implemented. Examples can be found in the gallery.
+
+
+
+
+

Custom converter#

+

First the parser and the shape calculator. +The parser defines the number of outputs and their type. +The shape calculator defines their dimensions.

+
def pyod_iforest_parser(scope, model, inputs, custom_parsers=None):
+    alias = get_model_alias(type(model))
+    this_operator = scope.declare_local_operator(alias, model)
+
+    # inputs
+    this_operator.inputs.append(inputs[0])
+
+    # outputs
+    cls_type = inputs[0].type.__class__
+    val_y1 = scope.declare_local_variable('label', Int64TensorType())
+    val_y2 = scope.declare_local_variable('probability', cls_type())
+    this_operator.outputs.append(val_y1)
+    this_operator.outputs.append(val_y2)
+
+    # end
+    return this_operator.outputs
+
+
+def pyod_iforest_shape_calculator(operator):
+    N = operator.inputs[0].get_first_dimension()
+    operator.outputs[0].type.shape = [N, 1]
+    operator.outputs[1].type.shape = [N, 2]
+
+
+

Then the converter.

+
def pyod_iforest_converter(scope, operator, container):
+    op = operator.raw_operator
+    opv = container.target_opset
+    out = operator.outputs
+
+    # We retrieve the unique input.
+    X = operator.inputs[0]
+
+    # In most case, computation happen in floats.
+    # But it might be with double. ONNX is very strict
+    # about types, every constant should have the same
+    # type as the input.
+    dtype = guess_numpy_type(X.type)
+
+    detector = op.detector_  # Should be IForest from scikit-learn.
+    lab_pred = OnnxSubEstimator(detector, X, op_version=opv)
+    scores = OnnxIdentity(lab_pred[1], op_version=opv)
+
+    # labels
+    threshold = op.threshold_
+    above = OnnxLess(scores, np.array([threshold], dtype=dtype),
+                     op_version=opv)
+    labels = OnnxCast(above, op_version=opv, to=onnx_proto.TensorProto.INT64,
+                      output_names=out[:1])
+
+    # probabilities
+    train_scores = op.decision_scores_
+    scaler = MinMaxScaler().fit(train_scores.reshape(-1, 1))
+    scores_ = OnnxMul(scores, np.array([-1], dtype=dtype),
+                      op_version=opv)
+    print(scaler.min_)
+    print(scaler.scale_)
+
+    scaled = OnnxMul(scores_, scaler.scale_.astype(dtype), op_version=opv)
+    scaled_centered = OnnxAdd(scaled, scaler.min_.astype(dtype),
+                              op_version=opv)
+    clipped = OnnxClip(scaled_centered, np.array([0], dtype=dtype),
+                       np.array([1], dtype=dtype),
+                       op_version=opv)
+    clipped_ = OnnxAdd(
+        OnnxMul(clipped, np.array([-1], dtype=dtype),
+                op_version=opv),
+        np.array([1], dtype=dtype),
+        op_version=opv)
+
+    scores_2d = OnnxConcat(clipped_, clipped, axis=1, op_version=opv,
+                           output_names=out[1:])
+
+    labels.add_to(scope, container)
+    scores_2d.add_to(scope, container)
+
+
+

Finally the registration.

+
if IForest is not None:
+    update_registered_converter(
+        IForest, "PyodIForest",
+        pyod_iforest_shape_calculator,
+        pyod_iforest_converter,
+        parser=pyod_iforest_parser)
+
+
+

And the conversion.

+
if IForest is not None:
+    onx = to_onnx(model1, initial_types=initial_type,
+                  target_opset={'': 14, 'ai.onnx.ml': 2})
+
+
+
[0.75171798]
+[13.95064645]
+
+
+
+
+

Checking discrepencies#

+
if IForest is not None:
+    data = sc_data.astype(np.float32)
+
+    expected_labels = model1.predict(data)
+    expected_proba = model1.predict_proba(data)
+
+    sess = InferenceSession(onx.SerializeToString())
+    res = sess.run(None, {'float_input': data})
+
+    onx_labels = res[0]
+    onx_proba = res[1]
+
+    diff_labels = np.abs(onx_labels.ravel() - expected_labels.ravel()).max()
+    diff_proba = np.abs(onx_proba.ravel() - expected_proba.ravel()).max()
+
+    print("dicrepencies:", diff_labels, diff_proba)
+
+    print("ONNX labels", onx_labels)
+    print("ONNX probabilities", onx_proba)
+
+
+
dicrepencies: 0 8.684300415451318e-07
+ONNX labels [[0]
+ [0]
+ [0]
+ [0]
+ [0]
+ [0]
+ [1]]
+ONNX probabilities [[1.         0.        ]
+ [0.809063   0.19093698]
+ [1.         0.        ]
+ [0.41380423 0.58619577]
+ [0.61369824 0.38630173]
+ [0.809063   0.19093698]
+ [0.         1.        ]]
+
+
+

Total running time of the script: ( 0 minutes 0.502 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/plot_woe_transformer.html b/auto_tutorial/plot_woe_transformer.html index f58dcf888..b6992dcac 100644 --- a/auto_tutorial/plot_woe_transformer.html +++ b/auto_tutorial/plot_woe_transformer.html @@ -1,1038 +1,892 @@ - - - - - - - - - Converter for WOE — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - - -
- - -
- -
- On this page -
- - -
- -
- -
- - -
- - - - -
- -
- - -
-

Converter for WOE#

-

WOE means Weights of Evidence. It consists in checking that -a feature X belongs to a series of regions - intervals -. -The results is the label of every intervals containing the feature.

- -
-

A simple example#

-

X is a vector made of the first ten integers. Class -WOETransformer -checks that every of them belongs to two intervals, -]1, 3[ (leftright-opened) and [5, 7] -(left-right-closed). The first interval is associated -to weight 55 and and the second one to 107.

-
import os
-import numpy as np
-import pandas as pd
-from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
-from onnxruntime import InferenceSession
-import matplotlib.pyplot as plt
-from skl2onnx import to_onnx
-from skl2onnx.sklapi import WOETransformer
-# automatically registers the converter for WOETransformer
-import skl2onnx.sklapi.register  # noqa
-
-X = np.arange(10).astype(np.float32).reshape((-1, 1))
-
-intervals = [
-    [(1., 3., False, False),
-     (5., 7., True, True)]]
-weights = [[55, 107]]
-
-woe1 = WOETransformer(intervals, onehot=False, weights=weights)
-woe1.fit(X)
-prd = woe1.transform(X)
-df = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})
-df
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Xwoe
00.00.0
11.00.0
22.055.0
33.00.0
44.00.0
55.0107.0
66.0107.0
77.0107.0
88.00.0
99.00.0
-
-
-
-
-
-

One Hot#

-

The transformer outputs one column with the weights. -But it could return one column per interval.

-
woe2 = WOETransformer(intervals, onehot=True, weights=weights)
-woe2.fit(X)
-prd = woe2.transform(X)
-df = pd.DataFrame(prd)
-df.columns = ['I1', 'I2']
-df['X'] = X
-df
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
I1I2X
00.00.00.0
10.00.01.0
255.00.02.0
30.00.03.0
40.00.04.0
50.0107.05.0
60.0107.06.0
70.0107.07.0
80.00.08.0
90.00.09.0
-
-
-
-

In that case, weights can be omitted. -The output is binary.

-
woe = WOETransformer(intervals, onehot=True)
-woe.fit(X)
-prd = woe.transform(X)
-df = pd.DataFrame(prd)
-df.columns = ['I1', 'I2']
-df['X'] = X
-df
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
I1I2X
00.00.00.0
10.00.01.0
21.00.02.0
30.00.03.0
40.00.04.0
50.01.05.0
60.01.06.0
70.01.07.0
80.00.08.0
90.00.09.0
-
-
-
-
-
-

Conversion to ONNX#

-

skl2onnx implements a converter for all cases.

-

onehot=False

-
onx1 = to_onnx(woe1, X)
-sess = InferenceSession(onx1.SerializeToString())
-print(sess.run(None, {'X': X})[0])
-
-
-

Out:

-
[[  0.]
- [  0.]
- [ 55.]
- [  0.]
- [  0.]
- [107.]
- [107.]
- [107.]
- [  0.]
- [  0.]]
-
-
-

onehot=True

-
onx2 = to_onnx(woe2, X)
-sess = InferenceSession(onx2.SerializeToString())
-print(sess.run(None, {'X': X})[0])
-
-
-

Out:

-
[[  0.   0.]
- [  0.   0.]
- [ 55.   0.]
- [  0.   0.]
- [  0.   0.]
- [  0. 107.]
- [  0. 107.]
- [  0. 107.]
- [  0.   0.]
- [  0.   0.]]
-
-
-
-
-

ONNX Graphs#

-

onehot=False

-
pydot_graph = GetPydotGraph(
-    onx1.graph, name=onx1.graph.name, rankdir="TB",
-    node_producer=GetOpNodeProducer(
-        "docstring", color="yellow", fillcolor="yellow", style="filled"))
-pydot_graph.write_dot("woe1.dot")
-
-os.system('dot -O -Gdpi=300 -Tpng woe1.dot')
-
-image = plt.imread("woe1.dot.png")
-fig, ax = plt.subplots(figsize=(10, 10))
-ax.imshow(image)
-ax.axis('off')
-
-
-plot woe transformer

Out:

-
(-0.5, 2129.5, 3321.5, -0.5)
-
-
-

onehot=True

-
pydot_graph = GetPydotGraph(
-    onx2.graph, name=onx2.graph.name, rankdir="TB",
-    node_producer=GetOpNodeProducer(
-        "docstring", color="yellow", fillcolor="yellow", style="filled"))
-pydot_graph.write_dot("woe2.dot")
-
-os.system('dot -O -Gdpi=300 -Tpng woe2.dot')
-
-image = plt.imread("woe2.dot.png")
-fig, ax = plt.subplots(figsize=(10, 10))
-ax.imshow(image)
-ax.axis('off')
-
-
-plot woe transformer

Out:

-
(-0.5, 2272.5, 5696.5, -0.5)
-
-
-
-
-

Half-line#

-

An interval may have only one extremity defined and the other -can be infinite.

-
intervals = [
-    [(-np.inf, 3., True, True),
-     (5., np.inf, True, True)]]
-weights = [[55, 107]]
-
-woe1 = WOETransformer(intervals, onehot=False, weights=weights)
-woe1.fit(X)
-prd = woe1.transform(X)
-df = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})
-df
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Xwoe
00.055.0
11.055.0
22.055.0
33.055.0
44.00.0
55.0107.0
66.0107.0
77.0107.0
88.0107.0
99.0107.0
-
-
-
-

And the conversion to ONNX using the same instruction.

-
onxinf = to_onnx(woe1, X)
-sess = InferenceSession(onxinf.SerializeToString())
-print(sess.run(None, {'X': X})[0])
-
-
-

Out:

-
[[ 55.]
- [ 55.]
- [ 55.]
- [ 55.]
- [  0.]
- [107.]
- [107.]
- [107.]
- [107.]
- [107.]]
-
-
-

Total running time of the script: ( 0 minutes 2.833 seconds)

- -

Gallery generated by Sphinx-Gallery

-
-
- - -
- - - - - -
- -
-
- - - - - - -
-
- + + + + + + + + + Converter for WOE - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+ +
+

Converter for WOE#

+

WOE means Weights of Evidence. It consists in checking that +a feature X belongs to a series of regions - intervals -. +The results is the label of every intervals containing the feature.

+
+

A simple example#

+

X is a vector made of the first ten integers. Class +WOETransformer +checks that every of them belongs to two intervals, +]1, 3[ (leftright-opened) and [5, 7] +(left-right-closed). The first interval is associated +to weight 55 and and the second one to 107.

+
import os
+import numpy as np
+import pandas as pd
+from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
+from onnxruntime import InferenceSession
+import matplotlib.pyplot as plt
+from skl2onnx import to_onnx
+from skl2onnx.sklapi import WOETransformer
+# automatically registers the converter for WOETransformer
+import skl2onnx.sklapi.register  # noqa
+
+X = np.arange(10).astype(np.float32).reshape((-1, 1))
+
+intervals = [
+    [(1., 3., False, False),
+     (5., 7., True, True)]]
+weights = [[55, 107]]
+
+woe1 = WOETransformer(intervals, onehot=False, weights=weights)
+woe1.fit(X)
+prd = woe1.transform(X)
+df = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})
+df
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Xwoe
00.00.0
11.00.0
22.055.0
33.00.0
44.00.0
55.0107.0
66.0107.0
77.0107.0
88.00.0
99.00.0
+
+
+
+
+
+

One Hot#

+

The transformer outputs one column with the weights. +But it could return one column per interval.

+
woe2 = WOETransformer(intervals, onehot=True, weights=weights)
+woe2.fit(X)
+prd = woe2.transform(X)
+df = pd.DataFrame(prd)
+df.columns = ['I1', 'I2']
+df['X'] = X
+df
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
I1I2X
00.00.00.0
10.00.01.0
255.00.02.0
30.00.03.0
40.00.04.0
50.0107.05.0
60.0107.06.0
70.0107.07.0
80.00.08.0
90.00.09.0
+
+
+
+

In that case, weights can be omitted. +The output is binary.

+
woe = WOETransformer(intervals, onehot=True)
+woe.fit(X)
+prd = woe.transform(X)
+df = pd.DataFrame(prd)
+df.columns = ['I1', 'I2']
+df['X'] = X
+df
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
I1I2X
00.00.00.0
10.00.01.0
21.00.02.0
30.00.03.0
40.00.04.0
50.01.05.0
60.01.06.0
70.01.07.0
80.00.08.0
90.00.09.0
+
+
+
+
+
+

Conversion to ONNX#

+

skl2onnx implements a converter for all cases.

+

onehot=False

+
onx1 = to_onnx(woe1, X)
+sess = InferenceSession(onx1.SerializeToString())
+print(sess.run(None, {'X': X})[0])
+
+
+
[[  0.]
+ [  0.]
+ [ 55.]
+ [  0.]
+ [  0.]
+ [107.]
+ [107.]
+ [107.]
+ [  0.]
+ [  0.]]
+
+
+

onehot=True

+
onx2 = to_onnx(woe2, X)
+sess = InferenceSession(onx2.SerializeToString())
+print(sess.run(None, {'X': X})[0])
+
+
+
[[  0.   0.]
+ [  0.   0.]
+ [ 55.   0.]
+ [  0.   0.]
+ [  0.   0.]
+ [  0. 107.]
+ [  0. 107.]
+ [  0. 107.]
+ [  0.   0.]
+ [  0.   0.]]
+
+
+
+
+

ONNX Graphs#

+

onehot=False

+
pydot_graph = GetPydotGraph(
+    onx1.graph, name=onx1.graph.name, rankdir="TB",
+    node_producer=GetOpNodeProducer(
+        "docstring", color="yellow", fillcolor="yellow", style="filled"))
+pydot_graph.write_dot("woe1.dot")
+
+os.system('dot -O -Gdpi=300 -Tpng woe1.dot')
+
+image = plt.imread("woe1.dot.png")
+fig, ax = plt.subplots(figsize=(10, 10))
+ax.imshow(image)
+ax.axis('off')
+
+
+plot woe transformer
(-0.5, 2674.5, 3321.5, -0.5)
+
+
+

onehot=True

+
pydot_graph = GetPydotGraph(
+    onx2.graph, name=onx2.graph.name, rankdir="TB",
+    node_producer=GetOpNodeProducer(
+        "docstring", color="yellow", fillcolor="yellow", style="filled"))
+pydot_graph.write_dot("woe2.dot")
+
+os.system('dot -O -Gdpi=300 -Tpng woe2.dot')
+
+image = plt.imread("woe2.dot.png")
+fig, ax = plt.subplots(figsize=(10, 10))
+ax.imshow(image)
+ax.axis('off')
+
+
+plot woe transformer
(-0.5, 2743.5, 5696.5, -0.5)
+
+
+
+
+

Half-line#

+

An interval may have only one extremity defined and the other +can be infinite.

+
intervals = [
+    [(-np.inf, 3., True, True),
+     (5., np.inf, True, True)]]
+weights = [[55, 107]]
+
+woe1 = WOETransformer(intervals, onehot=False, weights=weights)
+woe1.fit(X)
+prd = woe1.transform(X)
+df = pd.DataFrame({'X': X.ravel(), 'woe': prd.ravel()})
+df
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Xwoe
00.055.0
11.055.0
22.055.0
33.055.0
44.00.0
55.0107.0
66.0107.0
77.0107.0
88.0107.0
99.0107.0
+
+
+
+

And the conversion to ONNX using the same instruction.

+
onxinf = to_onnx(woe1, X)
+sess = InferenceSession(onxinf.SerializeToString())
+print(sess.run(None, {'X': X})[0])
+
+
+
[[ 55.]
+ [ 55.]
+ [ 55.]
+ [ 55.]
+ [  0.]
+ [107.]
+ [107.]
+ [107.]
+ [107.]
+ [107.]]
+
+
+

Total running time of the script: ( 0 minutes 3.361 seconds)

+ +

Gallery generated by Sphinx-Gallery

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/auto_tutorial/sg_execution_times.html b/auto_tutorial/sg_execution_times.html index 976ce474e..58ddefe76 100644 --- a/auto_tutorial/sg_execution_times.html +++ b/auto_tutorial/sg_execution_times.html @@ -1,361 +1,445 @@ - - - - - - - - - Computation times — sklearn-onnx 1.11.2 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - -
- - -
- - - -
- -
- -
- - -
- - - - -
- -
- -
-

Computation times#

-

01:31.345 total execution time for auto_tutorial files:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Convert a pipeline with a LightGBM regressor (plot_gexternal_lightgbm_reg.py)

00:31.683

0.0 MB

Benchmark ONNX conversion (plot_bbegin_measure_time.py)

00:31.317

0.0 MB

What is the opset number? (plot_cbegin_opset.py)

00:05.264

0.0 MB

Transfer Learning with ONNX (plot_gbegin_transfer_learning.py)

00:05.219

0.0 MB

Converter for WOE (plot_woe_transformer.py)

00:02.833

0.0 MB

A new converter with options (plot_lcustom_options.py)

00:02.376

0.0 MB

One model, many possible conversions with options (plot_dbegin_options.py)

00:01.810

0.0 MB

Black list operators when converting (plot_dbegin_options_list.py)

00:01.509

0.0 MB

Dataframe as an input (plot_gbegin_dataframe.py)

00:01.416

0.0 MB

Train and deploy a scikit-learn pipeline (plot_abegin_convert_pipeline.py)

00:01.391

0.0 MB

TfIdf and sparse matrices (plot_usparse_xgboost.py)

00:00.950

0.0 MB

Issues when switching to float (plot_ebegin_float_double.py)

00:00.825

0.0 MB

Change the number of outputs by adding a parser (plot_mcustom_parser.py)

00:00.597

0.0 MB

Convert a pipeline with a XGBoost model (plot_gexternal_xgboost.py)

00:00.586

0.0 MB

Intermediate results and investigation (plot_fbegin_investigate.py)

00:00.573

0.0 MB

Implement a new converter using other converters (plot_kcustom_converter_wrapper.py)

00:00.530

0.0 MB

Convert a pipeline with a LightGBM classifier (plot_gexternal_lightgbm.py)

00:00.510

0.0 MB

Converter for WOEEncoder from categorical_encoder (plot_catwoe_transformer.py)

00:00.459

0.0 MB

Fast design with a python runtime (plot_pextend_python_runtime.py)

00:00.441

0.0 MB

Implement a new converter (plot_icustom_converter.py)

00:00.406

0.0 MB

Choose appropriate output of a classifier (plot_dbegin_options_zipmap.py)

00:00.382

0.0 MB

Modify the ONNX graph (plot_gconverting.py)

00:00.103

0.0 MB

Store arrays in one onnx graph (plot_gbegin_cst.py)

00:00.081

0.0 MB

Two ways to implement a converter (plot_jcustom_syntax.py)

00:00.078

0.0 MB

Converter for pyod.models.iforest.IForest (plot_wext_pyod_forest.py)

00:00.006

0.0 MB

Fast runtime with onnxruntime (plot_qextend_onnxruntime.py)

00:00.000

0.0 MB

-
- - -
- - - -
- -
-
-
- -
- -
-
- - - - - - -
-
- + + + + + + + + + Computation times - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Computation times#

+

01:55.854 total execution time for auto_tutorial files:

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Benchmark ONNX conversion (plot_bbegin_measure_time.py)

00:55.146

0.0 MB

Convert a pipeline with a LightGBM regressor (plot_gexternal_lightgbm_reg.py)

00:39.867

0.0 MB

Converter for WOE (plot_woe_transformer.py)

00:03.361

0.0 MB

Transfer Learning with ONNX (plot_gbegin_transfer_learning.py)

00:02.727

0.0 MB

A new converter with options (plot_lcustom_options.py)

00:01.917

0.0 MB

What is the opset number? (plot_cbegin_opset.py)

00:01.345

0.0 MB

One model, many possible conversions with options (plot_dbegin_options.py)

00:01.322

0.0 MB

Convert a pipeline with a CatBoost classifier (plot_gexternal_catboost.py)

00:01.134

0.0 MB

Dataframe as an input (plot_gbegin_dataframe.py)

00:01.130

0.0 MB

Black list operators when converting (plot_dbegin_options_list.py)

00:01.110

0.0 MB

Train and deploy a scikit-learn pipeline (plot_abegin_convert_pipeline.py)

00:01.079

0.0 MB

Issues when switching to float (plot_ebegin_float_double.py)

00:00.838

0.0 MB

TfIdf and sparse matrices (plot_usparse_xgboost.py)

00:00.807

0.0 MB

Convert a pipeline with a XGBoost model (plot_gexternal_xgboost.py)

00:00.570

0.0 MB

Convert a pipeline with a LightGBM classifier (plot_gexternal_lightgbm.py)

00:00.542

0.0 MB

Intermediate results and investigation (plot_fbegin_investigate.py)

00:00.505

0.0 MB

Converter for pyod.models.iforest.IForest (plot_wext_pyod_forest.py)

00:00.502

0.0 MB

Implement a new converter using other converters (plot_kcustom_converter_wrapper.py)

00:00.499

0.0 MB

Implement a new converter (plot_icustom_converter.py)

00:00.321

0.0 MB

Choose appropriate output of a classifier (plot_dbegin_options_zipmap.py)

00:00.267

0.0 MB

Change the number of outputs by adding a parser (plot_mcustom_parser.py)

00:00.255

0.0 MB

Fast design with a python runtime (plot_pextend_python_runtime.py)

00:00.229

0.0 MB

Converter for WOEEncoder from categorical_encoder (plot_catwoe_transformer.py)

00:00.173

0.0 MB

Modify the ONNX graph (plot_gconverting.py)

00:00.063

0.0 MB

Tricky issue when converting CountVectorizer or TfidfVectorizer (plot_ngrams.py)

00:00.047

0.0 MB

Store arrays in one onnx graph (plot_gbegin_cst.py)

00:00.040

0.0 MB

Dealing with discrepancies (tf-idf) (plot_transformer_discrepancy.py)

00:00.030

0.0 MB

Two ways to implement a converter (plot_jcustom_syntax.py)

00:00.028

0.0 MB

Fast runtime with onnxruntime (plot_qextend_onnxruntime.py)

00:00.000

0.0 MB

+
+
+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/genindex.html b/genindex.html index fefc165a7..3cddb0569 100644 --- a/genindex.html +++ b/genindex.html @@ -1,2082 +1,2329 @@ - - - - - - - - Index — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - -
- -
- - - - - - -
- -
- - -

Index

- -
- A - | B - | C - | D - | E - | F - | G - | I - | L - | M - | O - | P - | S - | T - | U - | V - | W - | X - -
-

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - -
- -

G

- - - -
- -

I

- - - -
- -

L

- - - -
- -

M

- - - -
- -

O

- - - -
- -

P

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
- -

W

- - - -
- -

X

- - - -
- - - -
- - - -
-
- -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + Index - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+ +
+

Index

+
A | B | C | D | E | F | G | I | L | M | O | P | S | T | U | V | W | X
+
+
+

A

+ + + +
+
+ +
+

B

+ + + +
+
+ +
+

C

+ + + +
+
+ +
+

D

+ + + +
+
+ +
+

E

+ + + +
+
+ +
+

F

+ + +
+
+ +
+

G

+ + + +
+
+ +
+

I

+ + + +
+
+ +
+

L

+ + + +
+
+ +
+

M

+ + + +
+
+ +
+

O

+ + + +
+
+ +
+

P

+ + + +
+
+ +
+

S

+ + + +
+
+ +
+

T

+ + + +
+
+ +
+

U

+ + + +
+
+ +
+

V

+ + + +
+
+ +
+

W

+ + + +
+
+ +
+

X

+ + + +
+
+ + +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + \ No newline at end of file diff --git a/index.html b/index.html index 5f8b43f16..3dd88bdc9 100644 --- a/index.html +++ b/index.html @@ -1,322 +1,420 @@ - - - - - - - - - sklearn-onnx: Convert your scikit-learn model into ONNX — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

sklearn-onnx: Convert your scikit-learn model into ONNX#

-

sklearn-onnx enables you to convert models from -sklearn-learn -toolkits into ONNX.

- -

Issues, questions

-

You should look for existing issues -or submit a new one. Sources are available on -onnx/sklearn-onnx.

-

ONNX version

-

The converter can convert a model for a specific version of ONNX. -Every ONNX release is labelled with an opset number -returned by function onnx_opset_version. -This function returns the default value for parameter -target opset (parameter target_opset) if it is not specified -when converting the model. Every operator is versioned. -The library chooses the most recent version below or equal -to the targetted opset number for every operator. -The ONNX model has one opset number for every operator domain, -this value is the maximum opset number among all -onnx nodes.

-

<<<

-
from skl2onnx import __max_supported_opset__
-print("Last supported opset:", __max_supported_opset__)
-
-
-

>>>

-
    Last supported opset: 15
-
-
-

Backend

-

sklearn-onnx converts models in ONNX format which -can be then used to compute predictions with the -backend of your choice. However, there exists a way -to automatically check every converter with -onnxruntime, -onnxruntime-gpu. -Every converter is tested with this backend.

-
# Train a model.
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
-from sklearn.ensemble import RandomForestClassifier
-iris = load_iris()
-X, y = iris.data, iris.target
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-clr = RandomForestClassifier()
-clr.fit(X_train, y_train)
-
-# Convert into ONNX format
-from skl2onnx import convert_sklearn
-from skl2onnx.common.data_types import FloatTensorType
-initial_type = [('float_input', FloatTensorType([None, 4]))]
-onx = convert_sklearn(clr, initial_types=initial_type)
-with open("rf_iris.onnx", "wb") as f:
-    f.write(onx.SerializeToString())
-
-# Compute the prediction with ONNX Runtime
-import onnxruntime as rt
-import numpy
-sess = rt.InferenceSession("rf_iris.onnx")
-input_name = sess.get_inputs()[0].name
-label_name = sess.get_outputs()[0].name
-pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]
-
-
-

Related converters

-

sklearn-onnx only converts models from scikit-learn. -onnxmltools -can be used to convert models for libsvm, lightgbm, xgboost. -Other converters can be found on github/onnx, -torch.onnx, -ONNX-MXNet API, -Microsoft.ML.Onnx

-

Credits

-

The package was started by the following engineers and data scientists at -Microsoft starting from winter 2017: Zeeshan Ahmed, Wei-Sheng Chin, Aidan Crook, -Xavier Dupré, Costin Eseanu, Tom Finley, Lixin Gong, Scott Inglis, -Pei Jiang, Ivan Matantsev, Prabhat Roy, M. Zeeshan Siddiqui, -Shouheng Yi, Shauheen Zahirazami, Yiwen Zhu, Du Li, Xuan Li, Wenbing Li.

-

License

-

It is licensed with Apache License v2.0.

-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

sklearn-onnx: Convert your scikit-learn model into ONNX#

+

sklearn-onnx enables you to convert models from +scikit-learn +toolkits into ONNX.

+ +

Issues, questions

+

You should look for existing issues +or submit a new one. Sources are available on +onnx/sklearn-onnx.

+

ONNX version

+

The converter can convert a model for a specific version of ONNX. +Every ONNX release is labelled with an opset number +returned by function onnx_opset_version. +This function returns the default value for parameter +target opset (parameter target_opset) if it is not specified +when converting the model. Every operator is versioned. +The library chooses the most recent version below or equal +to the targetted opset number for every operator. +The ONNX model has one opset number for every operator domain, +this value is the maximum opset number among all +onnx nodes.

+

<<<

+
from skl2onnx import __max_supported_opset__
+print("Last supported opset:", __max_supported_opset__)
+
+
+

>>>

+
    Last supported opset: 18
+
+
+

Backend

+

sklearn-onnx converts models in ONNX format which +can be then used to compute predictions with the +backend of your choice. However, there exists a way +to automatically check every converter with +onnxruntime, +onnxruntime-gpu. +Every converter is tested with this backend.

+
# Train a model.
+from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+from sklearn.ensemble import RandomForestClassifier
+iris = load_iris()
+X, y = iris.data, iris.target
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+clr = RandomForestClassifier()
+clr.fit(X_train, y_train)
+
+# Convert into ONNX format
+from skl2onnx import convert_sklearn
+from skl2onnx.common.data_types import FloatTensorType
+initial_type = [('float_input', FloatTensorType([None, 4]))]
+onx = convert_sklearn(clr, initial_types=initial_type)
+with open("rf_iris.onnx", "wb") as f:
+    f.write(onx.SerializeToString())
+
+# Compute the prediction with ONNX Runtime
+import onnxruntime as rt
+import numpy
+sess = rt.InferenceSession("rf_iris.onnx", providers=["CPUExecutionProvider"])
+input_name = sess.get_inputs()[0].name
+label_name = sess.get_outputs()[0].name
+pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]
+
+
+

Related converters

+

sklearn-onnx only converts models from scikit-learn. +onnxmltools +can be used to convert models for libsvm, lightgbm, xgboost. +Other converters can be found on github/onnx, +torch.onnx, +ONNX-MXNet API, +Microsoft.ML.Onnx

+

Credits

+

The package was started by the following engineers and data scientists at +Microsoft starting from winter 2017: Zeeshan Ahmed, Wei-Sheng Chin, Aidan Crook, +Xavier Dupré, Costin Eseanu, Tom Finley, Lixin Gong, Scott Inglis, +Pei Jiang, Ivan Matantsev, Prabhat Roy, M. Zeeshan Siddiqui, +Shouheng Yi, Shauheen Zahirazami, Yiwen Zhu, Du Li, Xuan Li, Wenbing Li.

+

License

+

It is licensed with Apache License v2.0.

+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/index_tutorial.html b/index_tutorial.html index a7f178cf8..f914e5034 100644 --- a/index_tutorial.html +++ b/index_tutorial.html @@ -1,527 +1,430 @@ - - - - - - - - - Tutorial — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

Tutorial#

-

The tutorial goes from a simple example which -converts a pipeline to a more complex example -involving operator not actually implemented in -ONNX operators or ONNX ML operators.

- -

The tutorial was tested with following version:

-

<<<

-
import numpy
-import scipy
-import sklearn
-import lightgbm
-import onnx
-import onnxmltools
-import onnxruntime
-import xgboost
-import skl2onnx
-import mlprodict
-import pyquickhelper
-
-mods = [numpy, scipy, sklearn, lightgbm, xgboost,
-        onnx, onnxmltools, onnxruntime,
-        skl2onnx, mlprodict, pyquickhelper]
-mods = [(m.__name__, m.__version__) for m in mods]
-mx = max(len(_[0]) for _ in mods) + 1
-for name, vers in sorted(mods):
-    print("%s%s%s" % (name, " " * (mx - len(name)), vers))
-
-
-

>>>

-
    lightgbm      3.3.2
-    mlprodict     0.8.1697
-    numpy         1.21.3
-    onnx          1.11.0
-    onnxmltools   1.10.0
-    onnxruntime   1.10.0
-    pyquickhelper 1.11.3697
-    scipy         1.7.1
-    skl2onnx      1.11.1
-    sklearn       1.0
-    xgboost       1.5.2
-
-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Tutorial - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Tutorial#

+

The tutorial goes from a simple example which +converts a pipeline to a more complex example +involving operator not actually implemented in +ONNX operators or ONNX ML operators.

+ +

The tutorial was tested with following version:

+

<<<

+
import numpy
+import scipy
+import sklearn
+import lightgbm
+import onnx
+import onnxmltools
+import onnxruntime
+import xgboost
+import skl2onnx
+import mlprodict
+import pyquickhelper
+
+mods = [numpy, scipy, sklearn, lightgbm, xgboost,
+        onnx, onnxmltools, onnxruntime,
+        skl2onnx, mlprodict, pyquickhelper]
+mods = [(m.__name__, m.__version__) for m in mods]
+mx = max(len(_[0]) for _ in mods) + 1
+for name, vers in sorted(mods):
+    print("%s%s%s" % (name, " " * (mx - len(name)), vers))
+
+
+

>>>

+
    lightgbm      3.3.4
+    mlprodict     0.9.1887
+    numpy         1.23.5
+    onnx          1.14.0
+    onnxmltools   1.11.2
+    onnxruntime   1.15.0+cpu
+    pyquickhelper 1.11.3776
+    scipy         1.10.0
+    skl2onnx      1.14.0
+    sklearn       1.3.dev0
+    xgboost       1.7.3
+
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/introduction.html b/introduction.html index 87534dcad..1bd3faa05 100644 --- a/introduction.html +++ b/introduction.html @@ -1,393 +1,474 @@ - - - - - - - - - Introduction — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - - - - - - - -
- -
- -
-

Introduction#

- -
-

Quick start#

-

ONNX Runtime provides an easy way to run -machine learned models with high performance on CPU or GPU -without dependencies on the training framework. -Machine learning frameworks are usually optimized for -batch training rather than for prediction, which is a -more common scenario in applications, sites, and services. -At a high level, you can:

-
    -
  1. Train a model using your favorite framework.

  2. -
  3. Convert or export the model into ONNX format. -See ONNX Tutorials -for more details.

  4. -
  5. Load and run the model using ONNX Runtime.

  6. -
-

In this tutorial, we will briefly create a -pipeline with scikit-learn, convert it into -ONNX format and run the first predictions.

-
-

Step 1: Train a model using your favorite framework#

-

We’ll use the famous Iris datasets.

-
from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
-iris = load_iris()
-X, y = iris.data, iris.target
-X_train, X_test, y_train, y_test = train_test_split(X, y)
-
-from sklearn.linear_model import LogisticRegression
-clr = LogisticRegression()
-clr.fit(X_train, y_train)
-
-
-
-
-

Step 2: Convert or export the model into ONNX format#

-

ONNX is a format to describe -the machine learned model. -It defines a set of commonly used operators to compose models. -There are tools -to convert other model formats into ONNX. Here we will use -ONNXMLTools.

-
from skl2onnx import convert_sklearn
-from skl2onnx.common.data_types import FloatTensorType
-
-initial_type = [('float_input', FloatTensorType([None, 4]))]
-onx = convert_sklearn(clr, initial_types=initial_type)
-with open("logreg_iris.onnx", "wb") as f:
-    f.write(onx.SerializeToString())
-
-
-
-
-

Step 3: Load and run the model using ONNX Runtime#

-

We will use ONNX Runtime to compute the predictions -for this machine learning model.

-
import onnxruntime as rt
-sess = rt.InferenceSession("logreg_iris.onnx")
-input_name = sess.get_inputs()[0].name
-label_name = sess.get_outputs()[0].name
-
-pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]
-
-
-
-
-
-

convert_sklearn, to_onnx, initial_types#

-

The module implements two functions: -convert_sklearn and -to_onnx. The first one -was used in the previous examples, it requires two -mandatory arguments:

-
    -
  • a scikit-learn model or a pipeline

  • -
  • initial types

  • -
-

scikit-learn does not store information about -the training dataset. It is not always possible to retrieve -the number of features or their types. That’s why the -function needs another argument called initial_types. -In many cases, the training datasets is a numerical matrix -X_train. Then it becomes -initial_type=[('X', FloatTensorType([None, X_train.shape[1]]))]. -X is the name of this unique input, the second term indicates the -type and shape. The shape is [None, X_train.shape[1]], -the first dimension is the number of rows followed by the -number of features. The number of rows is undefined as the -the number of requested predictions is unknown at the time -the model is converted. The number of features is usually known. -Let’s assume now the input is a string column followed by -a matrix, then initial types would be:

-
initial_type=[
-    ('S', StringTensorType([None, 1])),
-    ('X', FloatTensorType([None, X_train.shape[1]])),
-]
-
-
-

Function to_onnx was implemented -after discussions with the core developers of scikit-learn. -It also contains a mechanism to infer the proper type based on -one row of the training datasets. Then, the following code -convert_sklearn(clr, initial_types=[('X', FloatTensorType([None, 4]))]) -is usually rewritten into to_onnx(clr, X_train[:1]) where -X_train is the training dataset, it can be a matrix or a -dataframe. The input name is 'X' by default unless X_train -is a dataframe. In that case, the column names are used -as input names.

-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Introduction - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Introduction#

+
+

Quick start#

+

ONNX Runtime provides an easy way to run +machine learned models with high performance on CPU or GPU +without dependencies on the training framework. +Machine learning frameworks are usually optimized for +batch training rather than for prediction, which is a +more common scenario in applications, sites, and services. +At a high level, you can:

+
    +
  1. Train a model using your favorite framework.

  2. +
  3. Convert or export the model into ONNX format. +See ONNX Tutorials +for more details.

  4. +
  5. Load and run the model using ONNX Runtime.

  6. +
+

In this tutorial, we will briefly create a +pipeline with scikit-learn, convert it into +ONNX format and run the first predictions.

+
+

Step 1: Train a model using your favorite framework#

+

We’ll use the famous Iris datasets.

+
from sklearn.datasets import load_iris
+from sklearn.model_selection import train_test_split
+iris = load_iris()
+X, y = iris.data, iris.target
+X_train, X_test, y_train, y_test = train_test_split(X, y)
+
+from sklearn.linear_model import LogisticRegression
+clr = LogisticRegression()
+clr.fit(X_train, y_train)
+
+
+
+
+

Step 2: Convert or export the model into ONNX format#

+

ONNX is a format to describe +the machine learned model. +It defines a set of commonly used operators to compose models. +There are tools +to convert other model formats into ONNX. Here we will use +ONNXMLTools.

+
from skl2onnx import convert_sklearn
+from skl2onnx.common.data_types import FloatTensorType
+
+initial_type = [('float_input', FloatTensorType([None, 4]))]
+onx = convert_sklearn(clr, initial_types=initial_type)
+with open("logreg_iris.onnx", "wb") as f:
+    f.write(onx.SerializeToString())
+
+
+
+
+

Step 3: Load and run the model using ONNX Runtime#

+

We will use ONNX Runtime to compute the predictions +for this machine learning model.

+
import onnxruntime as rt
+sess = rt.InferenceSession("logreg_iris.onnx", providers=["CPUExecutionProvider"])
+input_name = sess.get_inputs()[0].name
+label_name = sess.get_outputs()[0].name
+
+pred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]
+
+
+
+
+
+

convert_sklearn, to_onnx, initial_types#

+

The module implements two functions: +convert_sklearn and +to_onnx. The first one +was used in the previous examples, it requires two +mandatory arguments:

+
    +
  • a scikit-learn model or a pipeline

  • +
  • initial types

  • +
+

scikit-learn does not store information about +the training dataset. It is not always possible to retrieve +the number of features or their types. That’s why the +function needs another argument called initial_types. +In many cases, the training datasets is a numerical matrix +X_train. Then it becomes +initial_type=[('X', FloatTensorType([None, X_train.shape[1]]))]. +X is the name of this unique input, the second term indicates the +type and shape. The shape is [None, X_train.shape[1]], +the first dimension is the number of rows followed by the +number of features. The number of rows is undefined as the +the number of requested predictions is unknown at the time +the model is converted. The number of features is usually known. +Let’s assume now the input is a string column followed by +a matrix, then initial types would be:

+
initial_type=[
+    ('S', StringTensorType([None, 1])),
+    ('X', FloatTensorType([None, X_train.shape[1]])),
+]
+
+
+

Function to_onnx was implemented +after discussions with the core developers of scikit-learn. +It also contains a mechanism to infer the proper type based on +one row of the training datasets. Then, the following code +convert_sklearn(clr, initial_types=[('X', FloatTensorType([None, 4]))]) +is usually rewritten into to_onnx(clr, X_train[:1]) where +X_train is the training dataset, it can be a matrix or a +dataframe. The input name is 'X' by default unless X_train +is a dataframe. In that case, the column names are used +as input names.

+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv index 273b2c829..295074291 100644 Binary files a/objects.inv and b/objects.inv differ diff --git a/parameterized.html b/parameterized.html index e29fafb02..7109c3a46 100644 --- a/parameterized.html +++ b/parameterized.html @@ -1,453 +1,531 @@ - - - - - - - - - Converters with options — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - - - - - - - -
- -
- -
-

Converters with options#

-

Most of the converters always produce the same converted model -which computes the same outputs as the original model. -However, some of them do not and the user may need to alter the -conversion by giving additional information to the converter through -functions convert_sklearn -or to_onnx. -Every option ends up creating a different ONNX graph. -Below is the list of models which enable this mechanism.

- -
-

GaussianProcessRegressor, NearestNeighbors#

-

Both models require to compure pairwise distances. -Function onnx_cdist -produces this part of the graph but there exist two options. -The first one is using Scan operator, the second one is -using a dedicated operator called CDist which is not part -of the regular ONNX operator until issue -2442 -is addressed. By default, Scan is used, CDist can be used -by giving:

-
options={GaussianProcessRegressor: {'optim': 'cdist'}}
-
-
-

Previous line enables the optimization for every -model GaussianProcessRegressor but it can be done -only for one model by using:

-
options={id(model): {'optim': 'cdist'}}
-
-
-
-
-

TfidfVectorizer, CountVectorizer#

-
-
-skl2onnx.operator_converters.text_vectoriser.convert_sklearn_text_vectorizer(scope: skl2onnx.common._topology.Scope, operator: skl2onnx.common._topology.Operator, container: skl2onnx.common._container.ModelComponentContainer)[source]#
-

Converters for class -TfidfVectorizer. -The current implementation is a work in progress and the ONNX version -does not produce the exact same results. The converter lets the user -change some of its parameters.

-
-
tokenexp: string

The default will change to true in version 1.6.0. -The tokenizer splits into words using this regular -expression or the regular expression specified by -scikit-learn is the value is an empty string. -See also note below. -Default value: None

-
-
separators: list of separators

These separators are used to split a string into words. -Options separators is ignore if options tokenexp is not None. -Default value: [' ', '[.]', '\\?', ',', ';', ':', '\\!'].

-
-
-

Example (from TfIdfVectorizer with ONNX):

-
seps = {TfidfVectorizer: {"separators": [' ', '[.]', '\\?', ',', ';',
-                                         ':', '!', '\\(', '\\)',
-                                         '\n', '\\"', "'", "-",
-                                         "\\[", "\\]", "@"]}}
-model_onnx = convert_sklearn(pipeline, "tfidf",
-                             initial_types=[("input", StringTensorType([None, 2]))],
-                             options=seps)
-
-
-

The default regular expression of the tokenizer is (?u)\\b\\w\\w+\\b -(see re). -This expression may not supported by the library handling the backend. -onnxruntime uses -re2. You may need to switch -to a custom tokenizer based on -python wrapper for re2 -or its sources pyre2 -(syntax). -If the regular expression is not specified and if -the instance of TfidfVectorizer is using the default -pattern (?u)\\b\\w\\w+\\b, it is replaced by -[a-zA-Z0-9_]+. Any other case has to be -manually handled.

-

Regular expression [^\\\\n] is used to split -a sentance into character (and not works) if analyser=='char'. -The mode analyser=='char_wb' is not implemented.

-
-

Changed in version 1.6: Parameters have been renamed: sep into separators, -regex into tokenexp.

-
-
- -
-
-

Classifiers#

-

Converters for classifiers implement multiple options.

-
-

ZipMap#

-

The operator ZipMap produces a list of dictionaries. -It repeats class names or ids but that’s not necessary -(see issue 2149). -By default, ZipMap operator is added, it can be deactivated by using:

-
options={type(model): {'zipmap': False}}
-
-
-

It is implemented by PR 327.

-
-
-

Class information#

-

Class information is usually repeated in the ONNX operator -which classifies and the output of the ZipMap operator -(see issue 2149). -The following option can remove string information and class ids -in the ONNX operator to get smaller models.

-
options={type(model): {'nocl': True}}
-
-
-

Classes are replaced by integers from 0 to the number of classes.

-
-
-

Raw scores#

-

Almost all classifiers are converted in order to get probabilities -and not raw scores. That’s the default behaviour. It can be deactivated -by using option:

-
options={type(model): {'raw_scores': True}}
-
-
-

It is implemented by PR 308.

-
-
-
-

Pickability and Pipeline#

-

The proposed way to specify options is not always pickable. -Function id(model) depends on the execution and map an option -to one class may be not enough to customize the conversion. -However, it is possible to specify an option the same way -parameters are referenced in a scikit-learn pipeline -with method get_params. -Following syntax are supported:

-
pipe = Pipeline([('pca', PCA()), ('classifier', LogisticRegression())])
-
-options = {'classifier': {'zipmap': False}}
-
-
-

Or

-
options = {'classifier__zipmap': False}
-
-
-

Options applied to one model, not a pipeline as the converter -replaces the pipeline structure by a single onnx graph. -Following that rule, option zipmap would not have any impact -if applied to a pipeline and to the last step of the pipeline. -However, because there is no ambiguity about what the conversion -should be, for options zipmap and nocl, the following -options would have the same effect:

-
pipe = Pipeline([('pca', PCA()), ('classifier', LogisticRegression())])
-
-options = {id(pipe.steps[-1][1]): {'zipmap': False}}
-options = {id(pipe): {'zipmap': False}}
-options = {'classifier': {'zipmap': False}}
-options = {'classifier__zipmap': False}
-
-
-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Converters with options - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Converters with options#

+

Most of the converters always produce the same converted model +which computes the same outputs as the original model. +However, some of them do not and the user may need to alter the +conversion by giving additional information to the converter through +functions convert_sklearn +or to_onnx. +Every option ends up creating a different ONNX graph. +Below is the list of models which enable this mechanism.

+
+

GaussianProcessRegressor, NearestNeighbors#

+

Both models require to compure pairwise distances. +Function onnx_cdist +produces this part of the graph but there exist two options. +The first one is using Scan operator, the second one is +using a dedicated operator called CDist which is not part +of the regular ONNX operator until issue +2442 +is addressed. By default, Scan is used, CDist can be used +by giving:

+
options={GaussianProcessRegressor: {'optim': 'cdist'}}
+
+
+

Previous line enables the optimization for every +model GaussianProcessRegressor but it can be done +only for one model by using:

+
options={id(model): {'optim': 'cdist'}}
+
+
+
+
+

TfidfVectorizer, CountVectorizer#

+
+
+skl2onnx.operator_converters.text_vectoriser.convert_sklearn_text_vectorizer(scope: Scope, operator: Operator, container: ModelComponentContainer)[source]#
+

Converters for class +TfidfVectorizer. +The current implementation is a work in progress and the ONNX version +does not produce the exact same results. The converter lets the user +change some of its parameters.

+
+

Additional options#

+
+
tokenexp: string

The default will change to true in version 1.6.0. +The tokenizer splits into words using this regular +expression or the regular expression specified by +scikit-learn is the value is an empty string. +See also note below. +Default value: None

+
+
separators: list of separators

These separators are used to split a string into words. +Options separators is ignore if options tokenexp is not None. +Default value: [' ', '[.]', '\\?', ',', ';', ':', '\\!'].

+
+
+

Example (from TfIdfVectorizer with ONNX):

+
seps = {TfidfVectorizer: {"separators": [' ', '[.]', '\\?', ',', ';',
+                                         ':', '!', '\\(', '\\)',
+                                         '\n', '\\"', "'", "-",
+                                         "\\[", "\\]", "@"]}}
+model_onnx = convert_sklearn(pipeline, "tfidf",
+                             initial_types=[("input", StringTensorType([None, 2]))],
+                             options=seps)
+
+
+

The default regular expression of the tokenizer is (?u)\\b\\w\\w+\\b +(see re). +This expression may not supported by the library handling the backend. +onnxruntime uses +re2. You may need to switch +to a custom tokenizer based on +python wrapper for re2 +or its sources pyre2 +(syntax). +If the regular expression is not specified and if +the instance of TfidfVectorizer is using the default +pattern (?u)\\b\\w\\w+\\b, it is replaced by +[a-zA-Z0-9_]+. Any other case has to be +manually handled.

+

Regular expression [^\\\\n] is used to split +a sentance into character (and not works) if analyser=='char'. +The mode analyser=='char_wb' is not implemented.

+
+

Changed in version 1.6: Parameters have been renamed: sep into separators, +regex into tokenexp.

+
+
+
+ +
+
+
+

Classifiers#

+

Converters for classifiers implement multiple options.

+
+

ZipMap#

+

The operator ZipMap produces a list of dictionaries. +It repeats class names or ids but that’s not necessary +(see issue 2149). +By default, ZipMap operator is added, it can be deactivated by using:

+
options={type(model): {'zipmap': False}}
+
+
+

It is implemented by PR 327.

+
+
+

Class information#

+

Class information is usually repeated in the ONNX operator +which classifies and the output of the ZipMap operator +(see issue 2149). +The following option can remove string information and class ids +in the ONNX operator to get smaller models.

+
options={type(model): {'nocl': True}}
+
+
+

Classes are replaced by integers from 0 to the number of classes.

+
+
+

Raw scores#

+

Almost all classifiers are converted in order to get probabilities +and not raw scores. That’s the default behaviour. It can be deactivated +by using option:

+
options={type(model): {'raw_scores': True}}
+
+
+

It is implemented by PR 308.

+
+
+
+

Pickability and Pipeline#

+

The proposed way to specify options is not always pickable. +Function id(model) depends on the execution and map an option +to one class may be not enough to customize the conversion. +However, it is possible to specify an option the same way +parameters are referenced in a scikit-learn pipeline +with method get_params. +Following syntax are supported:

+
pipe = Pipeline([('pca', PCA()), ('classifier', LogisticRegression())])
+
+options = {'classifier': {'zipmap': False}}
+
+
+

Or

+
options = {'classifier__zipmap': False}
+
+
+

Options applied to one model, not a pipeline as the converter +replaces the pipeline structure by a single onnx graph. +Following that rule, option zipmap would not have any impact +if applied to a pipeline and to the last step of the pipeline. +However, because there is no ambiguity about what the conversion +should be, for options zipmap and nocl, the following +options would have the same effect:

+
pipe = Pipeline([('pca', PCA()), ('classifier', LogisticRegression())])
+
+options = {id(pipe.steps[-1][1]): {'zipmap': False}}
+options = {id(pipe): {'zipmap': False}}
+options = {'classifier': {'zipmap': False}}
+options = {'classifier__zipmap': False}
+
+
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/pipeline.html b/pipeline.html index 7b21b5165..56570e7d2 100644 --- a/pipeline.html +++ b/pipeline.html @@ -1,600 +1,676 @@ - - - - - - - - - Convert a pipeline — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - - - - - - - - -
- -
- -
-

Convert a pipeline#

- -

skl2onnx converts any machine learning pipeline into -ONNX pipelines. Every transformer or predictors is converted -into one or multiple nodes into the ONNX graph. -Any ONNX backend -can then use this graph to compute equivalent outputs for the same inputs.

-
-

Convert complex pipelines#

-

scikit-learn introduced -ColumnTransformer -useful to build complex pipelines such as the following one:

-
from sklearn.linear_model import LogisticRegression
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler, OneHotEncoder
-from sklearn.impute import SimpleImputer
-from sklearn.decomposition import TruncatedSVD
-from sklearn.compose import ColumnTransformer
-
-numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
-categorical_features = [3, 4] # ["vcat", "vcat2"]
-
-classifier = LogisticRegression(C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])),
-                                n_jobs=1, max_iter=10, solver='lbfgs', tol=1e-3)
-
-numeric_transformer = Pipeline(steps=[
-    ('imputer', SimpleImputer(strategy='median')),
-    ('scaler', StandardScaler())
-])
-
-categorical_transformer = Pipeline(steps=[
-    ('onehot', OneHotEncoder(sparse=True, handle_unknown='ignore')),
-    ('tsvd', TruncatedSVD(n_components=1, algorithm='arpack', tol=1e-4))
-])
-
-preprocessor = ColumnTransformer(
-    transformers=[
-        ('num', numeric_transformer, numeric_features),
-        ('cat', categorical_transformer, categorical_features)
-    ])
-
-model = Pipeline(steps=[
-    ('precprocessor', preprocessor),
-    ('classifier', classifier)
-])
-
-
-

Which we can represents as:

-
-

Once fitted, the model is converted into ONNX:

-
from skl2onnx import convert_sklearn
-from skl2onnx.common.data_types import FloatTensorType, StringTensorType
-
-initial_type = [('numfeat', FloatTensorType([None, 3])),
-                ('strfeat', StringTensorType([None, 2]))]
-model_onnx = convert_sklearn(model, initial_types=initial_type)
-
-
-
-

Note

-

The error AttributeError: 'ColumnTransformer' object has no attribute 'transformers_' -means the model was not trained. The converter tries to access an attribute -created by method fit.

-
-

It can be represented as a -DOT graph:

-
from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
-pydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name, rankdir="TP",
-                            node_producer=GetOpNodeProducer("docstring"))
-pydot_graph.write_dot("graph.dot")
-
-import os
-os.system('dot -O -Tpng graph.dot'
-
-
-_images/pipeline.png -
-
-

Parser, shape calculator, converter#

-

Three kinds of functions are involved into the conversion -of a scikit-pipeline. Each of them is called in the following -order:

-
    -
  • parser(scope, model, inputs, custom_parser): -the parser builds the expected outputs of a model, -as the resulting graph must contain unique names, -scope contains all names already given, -model is the model to convert, -inputs are the inputs the model receives -in the ONNX graph. It is a list of -Variable. -custom_parsers contains a map {model type: parser} -which extends the default list of parsers. -The parser defines default outputs for standard -machine learned problems. The shape calculator -changes the shapes and types for each of them -depending on the model and is called after all -outputs were defined (topology). This steps defines -the number of outputs and their types for every node -and sets them to a default shape [None, None] -which the output node has one row and no known -columns yet.

  • -
  • shape_calculator(model): -The shape calculator changes the shape -of the outputs created by the parser. Once this function -returned its results, the graph structure is fully defined -and cannot be changed. The shape calculator should -not change types. Many runtimes are implemented in C++ -and do not support implicit casts. A change of type -might make the runtime fail due to a type mismatch -between two consecutive nodes produces by two different -converters.

  • -
  • converter(scope, operator, container): -The converter converts the transformers or predictors into -ONNX nodes. Each node can an ONNX -operator or -ML operator or -custom ONNX operators.

  • -
-

As sklearn-onnx may convert pipelines with model coming from other libraries, -the library must handle parsers, shape calculators or converters coming -from other packages. This can be done is two ways. The first one -consists in calling function convert_sklearn -by mapping the model type to a specific parser, a specific shape calculator -or a specific converter. It is possible to avoid these specifications -by registering the new parser or shape calculator or converter -with one of the two functions -update_registered_converter, -update_registered_parser. -One example follows.

-
-
-

New converters in a pipeline#

-

Many libraries implement scikit-learn API and their models can -be included in scikit-learn pipelines. However, sklearn-onnx cannot -a pipeline which include a model such as XGBoost or LightGbm -if it does not know the corresponding converters: it needs to be registered. -That’s the purpose of function skl2onnx.update_registered_converter(). -The following example shows how to register a new converter or -or update an existing one. Four elements are registered:

-
    -
  • the model class

  • -
  • an alias, usually the class name prefixed by the library name

  • -
  • a shape calculator which computes the type and shape of the expected outputs

  • -
  • a model converter

  • -
-

The following lines shows what these four elements are for a random forest:

-
from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes
-from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier
-from skl2onnx import update_registered_converter
-update_registered_converter(SGDClassifier, 'SklearnLinearClassifier',
-                            calculate_linear_classifier_output_shapes,
-                            convert_sklearn_random_forest_classifier)
-
-
-

See example Convert a pipeline with a LightGBM classifier to see a complete example -with a LightGbm model.

-
-
-

Titanic example#

-

The first example was a simplified pipeline coming from scikit-learn’s documentation: -Column Transformer with Mixed Types. -The full story is available in a runnable example: Convert a pipeline with ColumnTransformer -which also shows up some mistakes that a user could come accross -when trying to convert a pipeline.

-
-
-

Parameterize the conversion#

-

Most of the converter do not require specific options -to convert a scikit-learn model. It always produces the same -results. However, in some cases, the conversion cannot produce -a model which returns the exact same results. The user may want -to optimize the conversion by giving the converter additional -information, even if the model to convert is included in a -pipeline. That why the option mechanism was implemented: -Converters with options.

-
-
-

Investigate discrepencies#

-

A wrong converter may introduce introduce discrepencies -in a converter pipeline but it is not alway easy to -isolate the source of the differences. The function -collect_intermediate_steps -may then be used to investigate each component independently. -The following piece of code is extracted from unit test -test_investigate.py and converts -a pipeline and each of its components independently.

-
import numpy
-from numpy.testing import assert_almost_equal
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-import onnxruntime
-from skl2onnx.helpers import collect_intermediate_steps, compare_objects
-from skl2onnx.common.data_types import FloatTensorType
-
-# Let's fit a model.
-data = numpy.array([[0, 0], [0, 0], [2, 1], [2, 1]],
-                   dtype=numpy.float32)
-model = Pipeline([("scaler1", StandardScaler()),
-                  ("scaler2", StandardScaler())])
-model.fit(data)
-
-# Convert and collect every operator in a pipeline
-# and modifies the current pipeline to keep
-# intermediate inputs and outputs when method
-# predict or transform is called.
-operators = collect_intermediate_steps(model, "pipeline",
-                                       [("input",
-                                         FloatTensorType([None, 2]))])
-
-# Method and transform is called.
-model.transform(data)
-
-# Loop on every operator.
-for op in operators:
-
-    # The ONNX for this operator.
-    onnx_step = op['onnx_step']
-
-    # Use onnxruntime to compute ONNX outputs
-    sess = onnxruntime.InferenceSession(onnx_step.SerializeToString())
-
-    # Let's use the initial data as the ONNX model
-    # contains all nodes from the first inputs to this node.
-    onnx_outputs = sess.run(None, {'input': data})
-    onnx_output = onnx_outputs[0]
-    skl_outputs = op['model']._debug.outputs['transform']
-
-    # Compares the outputs between scikit-learn and onnxruntime.
-    assert_almost_equal(onnx_output, skl_outputs)
-
-    # A function which is able to deal with different types.
-    compare_objects(onnx_output, skl_outputs)
-
-
-
-
-

Investigate missing converters#

-

Many converters can be missing before converting a pipeline. -Exception MissingShapeCalculator is -raised when the first missing one is found. -The previous snippet of code can be modified to find all of -them.

-
import numpy
-from numpy.testing import assert_almost_equal
-from sklearn.pipeline import Pipeline
-from sklearn.preprocessing import StandardScaler
-import onnxruntime
-from skl2onnx.common.data_types import guess_data_type
-from skl2onnx.common.exceptions import MissingShapeCalculator
-from skl2onnx.helpers import collect_intermediate_steps, compare_objects, enumerate_pipeline_models
-from skl2onnx.helpers.investigate import _alter_model_for_debugging
-from skl2onnx import convert_sklearn
-
-class MyScaler(StandardScaler):
-    pass
-
-# Let's fit a model.
-data = numpy.array([[0, 0], [0, 0], [2, 1], [2, 1]],
-                   dtype=numpy.float32)
-model = Pipeline([("scaler1", StandardScaler()),
-                  ("scaler2", StandardScaler()),
-                  ("scaler3", MyScaler()),
-                ])
-model.fit(data)
-
-# This function alters the pipeline, every time
-# methods transform or predict are used, inputs and outputs
-# are stored in every operator.
-_alter_model_for_debugging(model, recursive=True)
-
-# Let's use the pipeline and keep intermediate
-# inputs and outputs.
-model.transform(data)
-
-# Let's get the list of all operators to convert
-# and independently process them.
-all_models = list(enumerate_pipeline_models(model))
-
-# Loop on every operator.
-for ind, op, last in all_models:
-    if ind == (0,):
-        # whole pipeline
-        continue
-
-    # The dump input data for this operator.
-    data_in = op._debug.inputs['transform']
-
-    # Let's infer some initial shape.
-    t = guess_data_type(data_in)
-
-    # Let's convert.
-    try:
-        onnx_step = convert_sklearn(op, initial_types=t)
-    except MissingShapeCalculator as e:
-        if "MyScaler" in str(e):
-            print(e)
-            continue
-        raise
-
-    # If it does not fail, let's compare the ONNX outputs with
-    # the original operator.
-    sess = onnxruntime.InferenceSession(onnx_step.SerializeToString())
-    onnx_outputs = sess.run(None, {'input': data_in})
-    onnx_output = onnx_outputs[0]
-    skl_outputs = op._debug.outputs['transform']
-    assert_almost_equal(onnx_output, skl_outputs)
-    compare_objects(onnx_output, skl_outputs)
-
-
-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Convert a pipeline - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Convert a pipeline#

+

skl2onnx converts any machine learning pipeline into +ONNX pipelines. Every transformer or predictors is converted +into one or multiple nodes into the ONNX graph. +Any ONNX backend +can then use this graph to compute equivalent outputs for the same inputs.

+
+

Convert complex pipelines#

+

scikit-learn introduced +ColumnTransformer +useful to build complex pipelines such as the following one:

+
from sklearn.linear_model import LogisticRegression
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler, OneHotEncoder
+from sklearn.impute import SimpleImputer
+from sklearn.decomposition import TruncatedSVD
+from sklearn.compose import ColumnTransformer
+
+numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
+categorical_features = [3, 4] # ["vcat", "vcat2"]
+
+classifier = LogisticRegression(C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])),
+                                n_jobs=1, max_iter=10, solver='lbfgs', tol=1e-3)
+
+numeric_transformer = Pipeline(steps=[
+    ('imputer', SimpleImputer(strategy='median')),
+    ('scaler', StandardScaler())
+])
+
+categorical_transformer = Pipeline(steps=[
+    ('onehot', OneHotEncoder(sparse=True, handle_unknown='ignore')),
+    ('tsvd', TruncatedSVD(n_components=1, algorithm='arpack', tol=1e-4))
+])
+
+preprocessor = ColumnTransformer(
+    transformers=[
+        ('num', numeric_transformer, numeric_features),
+        ('cat', categorical_transformer, categorical_features)
+    ])
+
+model = Pipeline(steps=[
+    ('precprocessor', preprocessor),
+    ('classifier', classifier)
+])
+
+
+

Which we can represents as:

+
+

Once fitted, the model is converted into ONNX:

+
from skl2onnx import convert_sklearn
+from skl2onnx.common.data_types import FloatTensorType, StringTensorType
+
+initial_type = [('numfeat', FloatTensorType([None, 3])),
+                ('strfeat', StringTensorType([None, 2]))]
+model_onnx = convert_sklearn(model, initial_types=initial_type)
+
+
+
+

Note

+

The error AttributeError: 'ColumnTransformer' object has no attribute 'transformers_' +means the model was not trained. The converter tries to access an attribute +created by method fit.

+
+

It can be represented as a +DOT graph:

+
from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
+pydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name, rankdir="TP",
+                            node_producer=GetOpNodeProducer("docstring"))
+pydot_graph.write_dot("graph.dot")
+
+import os
+os.system('dot -O -Tpng graph.dot'
+
+
+_images/pipeline.png +
+
+

Parser, shape calculator, converter#

+

Three kinds of functions are involved into the conversion +of a scikit-pipeline. Each of them is called in the following +order:

+
    +
  • parser(scope, model, inputs, custom_parser): +the parser builds the expected outputs of a model, +as the resulting graph must contain unique names, +scope contains all names already given, +model is the model to convert, +inputs are the inputs the model receives +in the ONNX graph. It is a list of +Variable. +custom_parsers contains a map {model type: parser} +which extends the default list of parsers. +The parser defines default outputs for standard +machine learned problems. The shape calculator +changes the shapes and types for each of them +depending on the model and is called after all +outputs were defined (topology). This steps defines +the number of outputs and their types for every node +and sets them to a default shape [None, None] +which the output node has one row and no known +columns yet.

  • +
  • shape_calculator(model): +The shape calculator changes the shape +of the outputs created by the parser. Once this function +returned its results, the graph structure is fully defined +and cannot be changed. The shape calculator should +not change types. Many runtimes are implemented in C++ +and do not support implicit casts. A change of type +might make the runtime fail due to a type mismatch +between two consecutive nodes produces by two different +converters.

  • +
  • converter(scope, operator, container): +The converter converts the transformers or predictors into +ONNX nodes. Each node can an ONNX +operator or +ML operator or +custom ONNX operators.

  • +
+

As sklearn-onnx may convert pipelines with model coming from other libraries, +the library must handle parsers, shape calculators or converters coming +from other packages. This can be done is two ways. The first one +consists in calling function convert_sklearn +by mapping the model type to a specific parser, a specific shape calculator +or a specific converter. It is possible to avoid these specifications +by registering the new parser or shape calculator or converter +with one of the two functions +update_registered_converter, +update_registered_parser. +One example follows.

+
+
+

New converters in a pipeline#

+

Many libraries implement scikit-learn API and their models can +be included in scikit-learn pipelines. However, sklearn-onnx cannot +a pipeline which include a model such as XGBoost or LightGbm +if it does not know the corresponding converters: it needs to be registered. +That’s the purpose of function skl2onnx.update_registered_converter(). +The following example shows how to register a new converter or +or update an existing one. Four elements are registered:

+
    +
  • the model class

  • +
  • an alias, usually the class name prefixed by the library name

  • +
  • a shape calculator which computes the type and shape of the expected outputs

  • +
  • a model converter

  • +
+

The following lines shows what these four elements are for a random forest:

+
from skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes
+from skl2onnx.operator_converters.RandomForest import convert_sklearn_random_forest_classifier
+from skl2onnx import update_registered_converter
+update_registered_converter(SGDClassifier, 'SklearnLinearClassifier',
+                            calculate_linear_classifier_output_shapes,
+                            convert_sklearn_random_forest_classifier)
+
+
+

See example Convert a pipeline with a LightGBM classifier to see a complete example +with a LightGbm model.

+
+
+

Titanic example#

+

The first example was a simplified pipeline coming from scikit-learn’s documentation: +Column Transformer with Mixed Types. +The full story is available in a runnable example: Convert a pipeline with ColumnTransformer +which also shows up some mistakes that a user could come accross +when trying to convert a pipeline.

+
+
+

Parameterize the conversion#

+

Most of the converter do not require specific options +to convert a scikit-learn model. It always produces the same +results. However, in some cases, the conversion cannot produce +a model which returns the exact same results. The user may want +to optimize the conversion by giving the converter additional +information, even if the model to convert is included in a +pipeline. That why the option mechanism was implemented: +Converters with options.

+
+
+

Investigate discrepencies#

+

A wrong converter may introduce introduce discrepencies +in a converter pipeline but it is not alway easy to +isolate the source of the differences. The function +collect_intermediate_steps +may then be used to investigate each component independently. +The following piece of code is extracted from unit test +test_investigate.py and converts +a pipeline and each of its components independently.

+
import numpy
+from numpy.testing import assert_almost_equal
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+import onnxruntime
+from skl2onnx.helpers import collect_intermediate_steps, compare_objects
+from skl2onnx.common.data_types import FloatTensorType
+
+# Let's fit a model.
+data = numpy.array([[0, 0], [0, 0], [2, 1], [2, 1]],
+                   dtype=numpy.float32)
+model = Pipeline([("scaler1", StandardScaler()),
+                  ("scaler2", StandardScaler())])
+model.fit(data)
+
+# Convert and collect every operator in a pipeline
+# and modifies the current pipeline to keep
+# intermediate inputs and outputs when method
+# predict or transform is called.
+operators = collect_intermediate_steps(model, "pipeline",
+                                       [("input",
+                                         FloatTensorType([None, 2]))])
+
+# Method and transform is called.
+model.transform(data)
+
+# Loop on every operator.
+for op in operators:
+
+    # The ONNX for this operator.
+    onnx_step = op['onnx_step']
+
+    # Use onnxruntime to compute ONNX outputs
+    sess = onnxruntime.InferenceSession(onnx_step.SerializeToString(),
+                                        providers=["CPUExecutionProvider"])
+
+    # Let's use the initial data as the ONNX model
+    # contains all nodes from the first inputs to this node.
+    onnx_outputs = sess.run(None, {'input': data})
+    onnx_output = onnx_outputs[0]
+    skl_outputs = op['model']._debug.outputs['transform']
+
+    # Compares the outputs between scikit-learn and onnxruntime.
+    assert_almost_equal(onnx_output, skl_outputs)
+
+    # A function which is able to deal with different types.
+    compare_objects(onnx_output, skl_outputs)
+
+
+
+
+

Investigate missing converters#

+

Many converters can be missing before converting a pipeline. +Exception MissingShapeCalculator is +raised when the first missing one is found. +The previous snippet of code can be modified to find all of +them.

+
import numpy
+from numpy.testing import assert_almost_equal
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+import onnxruntime
+from skl2onnx.common.data_types import guess_data_type
+from skl2onnx.common.exceptions import MissingShapeCalculator
+from skl2onnx.helpers import collect_intermediate_steps, compare_objects, enumerate_pipeline_models
+from skl2onnx.helpers.investigate import _alter_model_for_debugging
+from skl2onnx import convert_sklearn
+
+class MyScaler(StandardScaler):
+    pass
+
+# Let's fit a model.
+data = numpy.array([[0, 0], [0, 0], [2, 1], [2, 1]],
+                   dtype=numpy.float32)
+model = Pipeline([("scaler1", StandardScaler()),
+                  ("scaler2", StandardScaler()),
+                  ("scaler3", MyScaler()),
+                ])
+model.fit(data)
+
+# This function alters the pipeline, every time
+# methods transform or predict are used, inputs and outputs
+# are stored in every operator.
+_alter_model_for_debugging(model, recursive=True)
+
+# Let's use the pipeline and keep intermediate
+# inputs and outputs.
+model.transform(data)
+
+# Let's get the list of all operators to convert
+# and independently process them.
+all_models = list(enumerate_pipeline_models(model))
+
+# Loop on every operator.
+for ind, op, last in all_models:
+    if ind == (0,):
+        # whole pipeline
+        continue
+
+    # The dump input data for this operator.
+    data_in = op._debug.inputs['transform']
+
+    # Let's infer some initial shape.
+    t = guess_data_type(data_in)
+
+    # Let's convert.
+    try:
+        onnx_step = convert_sklearn(op, initial_types=t)
+    except MissingShapeCalculator as e:
+        if "MyScaler" in str(e):
+            print(e)
+            continue
+        raise
+
+    # If it does not fail, let's compare the ONNX outputs with
+    # the original operator.
+    sess = onnxruntime.InferenceSession(onnx_step.SerializeToString(),
+                                        providers=["CPUExecutionProvider"])
+    onnx_outputs = sess.run(None, {'input': data_in})
+    onnx_output = onnx_outputs[0]
+    skl_outputs = op._debug.outputs['transform']
+    assert_almost_equal(onnx_output, skl_outputs)
+    compare_objects(onnx_output, skl_outputs)
+
+
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/search.html b/search.html index fffdf2b51..0c283aa2a 100644 --- a/search.html +++ b/search.html @@ -1,241 +1,329 @@ - - - - - - - - Search — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - -
- -
- - - - - - -
- -
- -

Search

- - - - -

- Searching for multiple words only shows matches that contain - all words. -

- - -
- - - -
- - - -
- -
- - -
- - - -
-
- -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + Search - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+ + + +
+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js index b7c760634..08a0a7efa 100644 --- a/searchindex.js +++ b/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["api_summary","auto_examples/index","auto_examples/plot_backend","auto_examples/plot_benchmark_cdist","auto_examples/plot_benchmark_pipeline","auto_examples/plot_black_op","auto_examples/plot_cast_transformer","auto_examples/plot_complex_pipeline","auto_examples/plot_convert_decision_function","auto_examples/plot_convert_model","auto_examples/plot_convert_syntax","auto_examples/plot_convert_zipmap","auto_examples/plot_custom_model","auto_examples/plot_custom_parser","auto_examples/plot_custom_parser_alternative","auto_examples/plot_errors_onnxruntime","auto_examples/plot_gpr","auto_examples/plot_intermediate_outputs","auto_examples/plot_investigate_pipeline","auto_examples/plot_logging","auto_examples/plot_metadata","auto_examples/plot_nmf","auto_examples/plot_onnx_operators","auto_examples/plot_pipeline","auto_examples/plot_pipeline_lightgbm","auto_examples/plot_pipeline_xgboost","auto_examples/plot_tfidfvectorizer","auto_examples/sg_execution_times","auto_tutorial/index","auto_tutorial/plot_abegin_convert_pipeline","auto_tutorial/plot_bbegin_measure_time","auto_tutorial/plot_catwoe_transformer","auto_tutorial/plot_cbegin_opset","auto_tutorial/plot_dbegin_options","auto_tutorial/plot_dbegin_options_list","auto_tutorial/plot_dbegin_options_zipmap","auto_tutorial/plot_ebegin_float_double","auto_tutorial/plot_fbegin_investigate","auto_tutorial/plot_gbegin_cst","auto_tutorial/plot_gbegin_dataframe","auto_tutorial/plot_gbegin_transfer_learning","auto_tutorial/plot_gconverting","auto_tutorial/plot_gexternal_lightgbm","auto_tutorial/plot_gexternal_lightgbm_reg","auto_tutorial/plot_gexternal_xgboost","auto_tutorial/plot_icustom_converter","auto_tutorial/plot_jcustom_syntax","auto_tutorial/plot_kcustom_converter_wrapper","auto_tutorial/plot_lcustom_options","auto_tutorial/plot_mcustom_parser","auto_tutorial/plot_pextend_python_runtime","auto_tutorial/plot_qextend_onnxruntime","auto_tutorial/plot_usparse_xgboost","auto_tutorial/plot_wext_pyod_forest","auto_tutorial/plot_woe_transformer","auto_tutorial/sg_execution_times","index","index_tutorial","introduction","parameterized","pipeline","supported","tutorial_1-5_external","tutorial_1_simple","tutorial_2-5_extlib","tutorial_2_new_converter","tutorial_3_new_operator","tutorial_4_advanced"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":5,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":3,"sphinx.domains.rst":2,"sphinx.domains.std":2,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api_summary.rst","auto_examples\\index.rst","auto_examples\\plot_backend.rst","auto_examples\\plot_benchmark_cdist.rst","auto_examples\\plot_benchmark_pipeline.rst","auto_examples\\plot_black_op.rst","auto_examples\\plot_cast_transformer.rst","auto_examples\\plot_complex_pipeline.rst","auto_examples\\plot_convert_decision_function.rst","auto_examples\\plot_convert_model.rst","auto_examples\\plot_convert_syntax.rst","auto_examples\\plot_convert_zipmap.rst","auto_examples\\plot_custom_model.rst","auto_examples\\plot_custom_parser.rst","auto_examples\\plot_custom_parser_alternative.rst","auto_examples\\plot_errors_onnxruntime.rst","auto_examples\\plot_gpr.rst","auto_examples\\plot_intermediate_outputs.rst","auto_examples\\plot_investigate_pipeline.rst","auto_examples\\plot_logging.rst","auto_examples\\plot_metadata.rst","auto_examples\\plot_nmf.rst","auto_examples\\plot_onnx_operators.rst","auto_examples\\plot_pipeline.rst","auto_examples\\plot_pipeline_lightgbm.rst","auto_examples\\plot_pipeline_xgboost.rst","auto_examples\\plot_tfidfvectorizer.rst","auto_examples\\sg_execution_times.rst","auto_tutorial\\index.rst","auto_tutorial\\plot_abegin_convert_pipeline.rst","auto_tutorial\\plot_bbegin_measure_time.rst","auto_tutorial\\plot_catwoe_transformer.rst","auto_tutorial\\plot_cbegin_opset.rst","auto_tutorial\\plot_dbegin_options.rst","auto_tutorial\\plot_dbegin_options_list.rst","auto_tutorial\\plot_dbegin_options_zipmap.rst","auto_tutorial\\plot_ebegin_float_double.rst","auto_tutorial\\plot_fbegin_investigate.rst","auto_tutorial\\plot_gbegin_cst.rst","auto_tutorial\\plot_gbegin_dataframe.rst","auto_tutorial\\plot_gbegin_transfer_learning.rst","auto_tutorial\\plot_gconverting.rst","auto_tutorial\\plot_gexternal_lightgbm.rst","auto_tutorial\\plot_gexternal_lightgbm_reg.rst","auto_tutorial\\plot_gexternal_xgboost.rst","auto_tutorial\\plot_icustom_converter.rst","auto_tutorial\\plot_jcustom_syntax.rst","auto_tutorial\\plot_kcustom_converter_wrapper.rst","auto_tutorial\\plot_lcustom_options.rst","auto_tutorial\\plot_mcustom_parser.rst","auto_tutorial\\plot_pextend_python_runtime.rst","auto_tutorial\\plot_qextend_onnxruntime.rst","auto_tutorial\\plot_usparse_xgboost.rst","auto_tutorial\\plot_wext_pyod_forest.rst","auto_tutorial\\plot_woe_transformer.rst","auto_tutorial\\sg_execution_times.rst","index.rst","index_tutorial.rst","introduction.rst","parameterized.rst","pipeline.rst","supported.rst","tutorial_1-5_external.rst","tutorial_1_simple.rst","tutorial_2-5_extlib.rst","tutorial_2_new_converter.rst","tutorial_3_new_operator.rst","tutorial_4_advanced.rst"],objects:{"skl2onnx._parse":[[0,0,1,"","parse_sklearn"],[0,0,1,"","parse_sklearn_model"]],"skl2onnx.algebra.onnx_ops":[[61,1,1,"","OnnxAbs"],[61,1,1,"","OnnxAbs_1"],[61,1,1,"","OnnxAbs_13"],[61,1,1,"","OnnxAbs_6"],[61,1,1,"","OnnxAcos"],[61,1,1,"","OnnxAcos_7"],[61,1,1,"","OnnxAcosh"],[61,1,1,"","OnnxAcosh_9"],[61,1,1,"","OnnxAdagrad"],[61,1,1,"","OnnxAdagrad_1"],[61,1,1,"","OnnxAdam"],[61,1,1,"","OnnxAdam_1"],[61,1,1,"","OnnxAdd"],[61,1,1,"","OnnxAdd_1"],[61,1,1,"","OnnxAdd_13"],[61,1,1,"","OnnxAdd_14"],[61,1,1,"","OnnxAdd_6"],[61,1,1,"","OnnxAdd_7"],[61,1,1,"","OnnxAnd"],[61,1,1,"","OnnxAnd_1"],[61,1,1,"","OnnxAnd_7"],[61,1,1,"","OnnxArgMax"],[61,1,1,"","OnnxArgMax_1"],[61,1,1,"","OnnxArgMax_11"],[61,1,1,"","OnnxArgMax_12"],[61,1,1,"","OnnxArgMax_13"],[61,1,1,"","OnnxArgMin"],[61,1,1,"","OnnxArgMin_1"],[61,1,1,"","OnnxArgMin_11"],[61,1,1,"","OnnxArgMin_12"],[61,1,1,"","OnnxArgMin_13"],[61,1,1,"","OnnxArrayFeatureExtractor"],[61,1,1,"","OnnxArrayFeatureExtractor_1"],[61,1,1,"","OnnxAsin"],[61,1,1,"","OnnxAsin_7"],[61,1,1,"","OnnxAsinh"],[61,1,1,"","OnnxAsinh_9"],[61,1,1,"","OnnxAtan"],[61,1,1,"","OnnxAtan_7"],[61,1,1,"","OnnxAtanh"],[61,1,1,"","OnnxAtanh_9"],[61,1,1,"","OnnxAveragePool"],[61,1,1,"","OnnxAveragePool_1"],[61,1,1,"","OnnxAveragePool_10"],[61,1,1,"","OnnxAveragePool_11"],[61,1,1,"","OnnxAveragePool_7"],[61,1,1,"","OnnxBatchNormalization"],[61,1,1,"","OnnxBatchNormalization_1"],[61,1,1,"","OnnxBatchNormalization_14"],[61,1,1,"","OnnxBatchNormalization_15"],[61,1,1,"","OnnxBatchNormalization_6"],[61,1,1,"","OnnxBatchNormalization_7"],[61,1,1,"","OnnxBatchNormalization_9"],[61,1,1,"","OnnxBernoulli"],[61,1,1,"","OnnxBernoulli_15"],[61,1,1,"","OnnxBinarizer"],[61,1,1,"","OnnxBinarizer_1"],[61,1,1,"","OnnxBitShift"],[61,1,1,"","OnnxBitShift_11"],[61,1,1,"","OnnxCast"],[61,1,1,"","OnnxCastLike"],[61,1,1,"","OnnxCastLike_15"],[61,1,1,"","OnnxCastMap"],[61,1,1,"","OnnxCastMap_1"],[61,1,1,"","OnnxCast_1"],[61,1,1,"","OnnxCast_13"],[61,1,1,"","OnnxCast_6"],[61,1,1,"","OnnxCast_9"],[61,1,1,"","OnnxCategoryMapper"],[61,1,1,"","OnnxCategoryMapper_1"],[61,1,1,"","OnnxCeil"],[61,1,1,"","OnnxCeil_1"],[61,1,1,"","OnnxCeil_13"],[61,1,1,"","OnnxCeil_6"],[61,1,1,"","OnnxCelu"],[61,1,1,"","OnnxCelu_12"],[61,1,1,"","OnnxClip"],[61,1,1,"","OnnxClip_1"],[61,1,1,"","OnnxClip_11"],[61,1,1,"","OnnxClip_12"],[61,1,1,"","OnnxClip_13"],[61,1,1,"","OnnxClip_6"],[61,1,1,"","OnnxCompress"],[61,1,1,"","OnnxCompress_11"],[61,1,1,"","OnnxCompress_9"],[61,1,1,"","OnnxConcat"],[61,1,1,"","OnnxConcatFromSequence"],[61,1,1,"","OnnxConcatFromSequence_11"],[61,1,1,"","OnnxConcat_1"],[61,1,1,"","OnnxConcat_11"],[61,1,1,"","OnnxConcat_13"],[61,1,1,"","OnnxConcat_4"],[61,1,1,"","OnnxConstant"],[61,1,1,"","OnnxConstantOfShape"],[61,1,1,"","OnnxConstantOfShape_9"],[61,1,1,"","OnnxConstant_1"],[61,1,1,"","OnnxConstant_11"],[61,1,1,"","OnnxConstant_12"],[61,1,1,"","OnnxConstant_13"],[61,1,1,"","OnnxConstant_9"],[61,1,1,"","OnnxConv"],[61,1,1,"","OnnxConvInteger"],[61,1,1,"","OnnxConvInteger_10"],[61,1,1,"","OnnxConvTranspose"],[61,1,1,"","OnnxConvTranspose_1"],[61,1,1,"","OnnxConvTranspose_11"],[61,1,1,"","OnnxConv_1"],[61,1,1,"","OnnxConv_11"],[61,1,1,"","OnnxCos"],[61,1,1,"","OnnxCos_7"],[61,1,1,"","OnnxCosh"],[61,1,1,"","OnnxCosh_9"],[61,1,1,"","OnnxCumSum"],[61,1,1,"","OnnxCumSum_11"],[61,1,1,"","OnnxCumSum_14"],[61,1,1,"","OnnxDepthToSpace"],[61,1,1,"","OnnxDepthToSpace_1"],[61,1,1,"","OnnxDepthToSpace_11"],[61,1,1,"","OnnxDepthToSpace_13"],[61,1,1,"","OnnxDequantizeLinear"],[61,1,1,"","OnnxDequantizeLinear_10"],[61,1,1,"","OnnxDequantizeLinear_13"],[61,1,1,"","OnnxDet"],[61,1,1,"","OnnxDet_11"],[61,1,1,"","OnnxDictVectorizer"],[61,1,1,"","OnnxDictVectorizer_1"],[61,1,1,"","OnnxDiv"],[61,1,1,"","OnnxDiv_1"],[61,1,1,"","OnnxDiv_13"],[61,1,1,"","OnnxDiv_14"],[61,1,1,"","OnnxDiv_6"],[61,1,1,"","OnnxDiv_7"],[61,1,1,"","OnnxDropout"],[61,1,1,"","OnnxDropout_1"],[61,1,1,"","OnnxDropout_10"],[61,1,1,"","OnnxDropout_12"],[61,1,1,"","OnnxDropout_13"],[61,1,1,"","OnnxDropout_6"],[61,1,1,"","OnnxDropout_7"],[61,1,1,"","OnnxDynamicQuantizeLinear"],[61,1,1,"","OnnxDynamicQuantizeLinear_11"],[61,1,1,"","OnnxEinsum"],[61,1,1,"","OnnxEinsum_12"],[61,1,1,"","OnnxElu"],[61,1,1,"","OnnxElu_1"],[61,1,1,"","OnnxElu_6"],[61,1,1,"","OnnxEqual"],[61,1,1,"","OnnxEqual_1"],[61,1,1,"","OnnxEqual_11"],[61,1,1,"","OnnxEqual_13"],[61,1,1,"","OnnxEqual_7"],[61,1,1,"","OnnxErf"],[61,1,1,"","OnnxErf_13"],[61,1,1,"","OnnxErf_9"],[61,1,1,"","OnnxExp"],[61,1,1,"","OnnxExp_1"],[61,1,1,"","OnnxExp_13"],[61,1,1,"","OnnxExp_6"],[61,1,1,"","OnnxExpand"],[61,1,1,"","OnnxExpand_13"],[61,1,1,"","OnnxExpand_8"],[61,1,1,"","OnnxEyeLike"],[61,1,1,"","OnnxEyeLike_9"],[61,1,1,"","OnnxFeatureVectorizer"],[61,1,1,"","OnnxFeatureVectorizer_1"],[61,1,1,"","OnnxFlatten"],[61,1,1,"","OnnxFlatten_1"],[61,1,1,"","OnnxFlatten_11"],[61,1,1,"","OnnxFlatten_13"],[61,1,1,"","OnnxFlatten_9"],[61,1,1,"","OnnxFloor"],[61,1,1,"","OnnxFloor_1"],[61,1,1,"","OnnxFloor_13"],[61,1,1,"","OnnxFloor_6"],[61,1,1,"","OnnxGRU"],[61,1,1,"","OnnxGRU_1"],[61,1,1,"","OnnxGRU_14"],[61,1,1,"","OnnxGRU_3"],[61,1,1,"","OnnxGRU_7"],[61,1,1,"","OnnxGather"],[61,1,1,"","OnnxGatherElements"],[61,1,1,"","OnnxGatherElements_11"],[61,1,1,"","OnnxGatherElements_13"],[61,1,1,"","OnnxGatherND"],[61,1,1,"","OnnxGatherND_11"],[61,1,1,"","OnnxGatherND_12"],[61,1,1,"","OnnxGatherND_13"],[61,1,1,"","OnnxGather_1"],[61,1,1,"","OnnxGather_11"],[61,1,1,"","OnnxGather_13"],[61,1,1,"","OnnxGemm"],[61,1,1,"","OnnxGemm_1"],[61,1,1,"","OnnxGemm_11"],[61,1,1,"","OnnxGemm_13"],[61,1,1,"","OnnxGemm_6"],[61,1,1,"","OnnxGemm_7"],[61,1,1,"","OnnxGemm_9"],[61,1,1,"","OnnxGlobalAveragePool"],[61,1,1,"","OnnxGlobalAveragePool_1"],[61,1,1,"","OnnxGlobalLpPool"],[61,1,1,"","OnnxGlobalLpPool_1"],[61,1,1,"","OnnxGlobalLpPool_2"],[61,1,1,"","OnnxGlobalMaxPool"],[61,1,1,"","OnnxGlobalMaxPool_1"],[61,1,1,"","OnnxGradient"],[61,1,1,"","OnnxGradient_1"],[61,1,1,"","OnnxGreater"],[61,1,1,"","OnnxGreaterOrEqual"],[61,1,1,"","OnnxGreaterOrEqual_12"],[61,1,1,"","OnnxGreaterOrEqual_16"],[61,1,1,"","OnnxGreater_1"],[61,1,1,"","OnnxGreater_13"],[61,1,1,"","OnnxGreater_7"],[61,1,1,"","OnnxGreater_9"],[61,1,1,"","OnnxGridSample"],[61,1,1,"","OnnxGridSample_16"],[61,1,1,"","OnnxHardSigmoid"],[61,1,1,"","OnnxHardSigmoid_1"],[61,1,1,"","OnnxHardSigmoid_6"],[61,1,1,"","OnnxHardSwish"],[61,1,1,"","OnnxHardSwish_14"],[61,1,1,"","OnnxHardmax"],[61,1,1,"","OnnxHardmax_1"],[61,1,1,"","OnnxHardmax_11"],[61,1,1,"","OnnxHardmax_13"],[61,1,1,"","OnnxIdentity"],[61,1,1,"","OnnxIdentity_1"],[61,1,1,"","OnnxIdentity_13"],[61,1,1,"","OnnxIdentity_14"],[61,1,1,"","OnnxIdentity_16"],[61,1,1,"","OnnxIf"],[61,1,1,"","OnnxIf_1"],[61,1,1,"","OnnxIf_11"],[61,1,1,"","OnnxIf_13"],[61,1,1,"","OnnxIf_16"],[61,1,1,"","OnnxImputer"],[61,1,1,"","OnnxImputer_1"],[61,1,1,"","OnnxInstanceNormalization"],[61,1,1,"","OnnxInstanceNormalization_1"],[61,1,1,"","OnnxInstanceNormalization_6"],[61,1,1,"","OnnxIsInf"],[61,1,1,"","OnnxIsInf_10"],[61,1,1,"","OnnxIsNaN"],[61,1,1,"","OnnxIsNaN_13"],[61,1,1,"","OnnxIsNaN_9"],[61,1,1,"","OnnxLRN"],[61,1,1,"","OnnxLRN_1"],[61,1,1,"","OnnxLRN_13"],[61,1,1,"","OnnxLSTM"],[61,1,1,"","OnnxLSTM_1"],[61,1,1,"","OnnxLSTM_14"],[61,1,1,"","OnnxLSTM_7"],[61,1,1,"","OnnxLabelEncoder"],[61,1,1,"","OnnxLabelEncoder_1"],[61,1,1,"","OnnxLabelEncoder_2"],[61,1,1,"","OnnxLeakyRelu"],[61,1,1,"","OnnxLeakyRelu_1"],[61,1,1,"","OnnxLeakyRelu_16"],[61,1,1,"","OnnxLeakyRelu_6"],[61,1,1,"","OnnxLess"],[61,1,1,"","OnnxLessOrEqual"],[61,1,1,"","OnnxLessOrEqual_12"],[61,1,1,"","OnnxLessOrEqual_16"],[61,1,1,"","OnnxLess_1"],[61,1,1,"","OnnxLess_13"],[61,1,1,"","OnnxLess_7"],[61,1,1,"","OnnxLess_9"],[61,1,1,"","OnnxLinearClassifier"],[61,1,1,"","OnnxLinearClassifier_1"],[61,1,1,"","OnnxLinearRegressor"],[61,1,1,"","OnnxLinearRegressor_1"],[61,1,1,"","OnnxLog"],[61,1,1,"","OnnxLogSoftmax"],[61,1,1,"","OnnxLogSoftmax_1"],[61,1,1,"","OnnxLogSoftmax_11"],[61,1,1,"","OnnxLogSoftmax_13"],[61,1,1,"","OnnxLog_1"],[61,1,1,"","OnnxLog_13"],[61,1,1,"","OnnxLog_6"],[61,1,1,"","OnnxLoop"],[61,1,1,"","OnnxLoop_1"],[61,1,1,"","OnnxLoop_11"],[61,1,1,"","OnnxLoop_13"],[61,1,1,"","OnnxLoop_16"],[61,1,1,"","OnnxLpNormalization"],[61,1,1,"","OnnxLpNormalization_1"],[61,1,1,"","OnnxLpPool"],[61,1,1,"","OnnxLpPool_1"],[61,1,1,"","OnnxLpPool_11"],[61,1,1,"","OnnxLpPool_2"],[61,1,1,"","OnnxMatMul"],[61,1,1,"","OnnxMatMulInteger"],[61,1,1,"","OnnxMatMulInteger_10"],[61,1,1,"","OnnxMatMul_1"],[61,1,1,"","OnnxMatMul_13"],[61,1,1,"","OnnxMatMul_9"],[61,1,1,"","OnnxMax"],[61,1,1,"","OnnxMaxPool"],[61,1,1,"","OnnxMaxPool_1"],[61,1,1,"","OnnxMaxPool_10"],[61,1,1,"","OnnxMaxPool_11"],[61,1,1,"","OnnxMaxPool_12"],[61,1,1,"","OnnxMaxPool_8"],[61,1,1,"","OnnxMaxRoiPool"],[61,1,1,"","OnnxMaxRoiPool_1"],[61,1,1,"","OnnxMaxUnpool"],[61,1,1,"","OnnxMaxUnpool_11"],[61,1,1,"","OnnxMaxUnpool_9"],[61,1,1,"","OnnxMax_1"],[61,1,1,"","OnnxMax_12"],[61,1,1,"","OnnxMax_13"],[61,1,1,"","OnnxMax_6"],[61,1,1,"","OnnxMax_8"],[61,1,1,"","OnnxMean"],[61,1,1,"","OnnxMeanVarianceNormalization"],[61,1,1,"","OnnxMeanVarianceNormalization_13"],[61,1,1,"","OnnxMeanVarianceNormalization_9"],[61,1,1,"","OnnxMean_1"],[61,1,1,"","OnnxMean_13"],[61,1,1,"","OnnxMean_6"],[61,1,1,"","OnnxMean_8"],[61,1,1,"","OnnxMin"],[61,1,1,"","OnnxMin_1"],[61,1,1,"","OnnxMin_12"],[61,1,1,"","OnnxMin_13"],[61,1,1,"","OnnxMin_6"],[61,1,1,"","OnnxMin_8"],[61,1,1,"","OnnxMod"],[61,1,1,"","OnnxMod_10"],[61,1,1,"","OnnxMod_13"],[61,1,1,"","OnnxMomentum"],[61,1,1,"","OnnxMomentum_1"],[61,1,1,"","OnnxMul"],[61,1,1,"","OnnxMul_1"],[61,1,1,"","OnnxMul_13"],[61,1,1,"","OnnxMul_14"],[61,1,1,"","OnnxMul_6"],[61,1,1,"","OnnxMul_7"],[61,1,1,"","OnnxMultinomial"],[61,1,1,"","OnnxMultinomial_7"],[61,1,1,"","OnnxNeg"],[61,1,1,"","OnnxNeg_1"],[61,1,1,"","OnnxNeg_13"],[61,1,1,"","OnnxNeg_6"],[61,1,1,"","OnnxNegativeLogLikelihoodLoss"],[61,1,1,"","OnnxNegativeLogLikelihoodLoss_12"],[61,1,1,"","OnnxNegativeLogLikelihoodLoss_13"],[61,1,1,"","OnnxNonMaxSuppression"],[61,1,1,"","OnnxNonMaxSuppression_10"],[61,1,1,"","OnnxNonMaxSuppression_11"],[61,1,1,"","OnnxNonZero"],[61,1,1,"","OnnxNonZero_13"],[61,1,1,"","OnnxNonZero_9"],[61,1,1,"","OnnxNormalizer"],[61,1,1,"","OnnxNormalizer_1"],[61,1,1,"","OnnxNot"],[61,1,1,"","OnnxNot_1"],[61,1,1,"","OnnxOneHot"],[61,1,1,"","OnnxOneHotEncoder"],[61,1,1,"","OnnxOneHotEncoder_1"],[61,1,1,"","OnnxOneHot_11"],[61,1,1,"","OnnxOneHot_9"],[61,1,1,"","OnnxOptional"],[61,1,1,"","OnnxOptionalGetElement"],[61,1,1,"","OnnxOptionalGetElement_15"],[61,1,1,"","OnnxOptionalHasElement"],[61,1,1,"","OnnxOptionalHasElement_15"],[61,1,1,"","OnnxOptional_15"],[61,1,1,"","OnnxOr"],[61,1,1,"","OnnxOr_1"],[61,1,1,"","OnnxOr_7"],[61,1,1,"","OnnxPRelu"],[61,1,1,"","OnnxPRelu_1"],[61,1,1,"","OnnxPRelu_16"],[61,1,1,"","OnnxPRelu_6"],[61,1,1,"","OnnxPRelu_7"],[61,1,1,"","OnnxPRelu_9"],[61,1,1,"","OnnxPad"],[61,1,1,"","OnnxPad_1"],[61,1,1,"","OnnxPad_11"],[61,1,1,"","OnnxPad_13"],[61,1,1,"","OnnxPad_2"],[61,1,1,"","OnnxPow"],[61,1,1,"","OnnxPow_1"],[61,1,1,"","OnnxPow_12"],[61,1,1,"","OnnxPow_13"],[61,1,1,"","OnnxPow_15"],[61,1,1,"","OnnxPow_7"],[61,1,1,"","OnnxQLinearConv"],[61,1,1,"","OnnxQLinearConv_10"],[61,1,1,"","OnnxQLinearMatMul"],[61,1,1,"","OnnxQLinearMatMul_10"],[61,1,1,"","OnnxQuantizeLinear"],[61,1,1,"","OnnxQuantizeLinear_10"],[61,1,1,"","OnnxQuantizeLinear_13"],[61,1,1,"","OnnxRNN"],[61,1,1,"","OnnxRNN_1"],[61,1,1,"","OnnxRNN_14"],[61,1,1,"","OnnxRNN_7"],[61,1,1,"","OnnxRandomNormal"],[61,1,1,"","OnnxRandomNormalLike"],[61,1,1,"","OnnxRandomNormalLike_1"],[61,1,1,"","OnnxRandomNormal_1"],[61,1,1,"","OnnxRandomUniform"],[61,1,1,"","OnnxRandomUniformLike"],[61,1,1,"","OnnxRandomUniformLike_1"],[61,1,1,"","OnnxRandomUniform_1"],[61,1,1,"","OnnxRange"],[61,1,1,"","OnnxRange_11"],[61,1,1,"","OnnxReciprocal"],[61,1,1,"","OnnxReciprocal_1"],[61,1,1,"","OnnxReciprocal_13"],[61,1,1,"","OnnxReciprocal_6"],[61,1,1,"","OnnxReduceL1"],[61,1,1,"","OnnxReduceL1_1"],[61,1,1,"","OnnxReduceL1_11"],[61,1,1,"","OnnxReduceL1_13"],[61,1,1,"","OnnxReduceL2"],[61,1,1,"","OnnxReduceL2_1"],[61,1,1,"","OnnxReduceL2_11"],[61,1,1,"","OnnxReduceL2_13"],[61,1,1,"","OnnxReduceLogSum"],[61,1,1,"","OnnxReduceLogSumExp"],[61,1,1,"","OnnxReduceLogSumExp_1"],[61,1,1,"","OnnxReduceLogSumExp_11"],[61,1,1,"","OnnxReduceLogSumExp_13"],[61,1,1,"","OnnxReduceLogSum_1"],[61,1,1,"","OnnxReduceLogSum_11"],[61,1,1,"","OnnxReduceLogSum_13"],[61,1,1,"","OnnxReduceMax"],[61,1,1,"","OnnxReduceMax_1"],[61,1,1,"","OnnxReduceMax_11"],[61,1,1,"","OnnxReduceMax_12"],[61,1,1,"","OnnxReduceMax_13"],[61,1,1,"","OnnxReduceMean"],[61,1,1,"","OnnxReduceMean_1"],[61,1,1,"","OnnxReduceMean_11"],[61,1,1,"","OnnxReduceMean_13"],[61,1,1,"","OnnxReduceMin"],[61,1,1,"","OnnxReduceMin_1"],[61,1,1,"","OnnxReduceMin_11"],[61,1,1,"","OnnxReduceMin_12"],[61,1,1,"","OnnxReduceMin_13"],[61,1,1,"","OnnxReduceProd"],[61,1,1,"","OnnxReduceProd_1"],[61,1,1,"","OnnxReduceProd_11"],[61,1,1,"","OnnxReduceProd_13"],[61,1,1,"","OnnxReduceSum"],[61,1,1,"","OnnxReduceSumSquare"],[61,1,1,"","OnnxReduceSumSquare_1"],[61,1,1,"","OnnxReduceSumSquare_11"],[61,1,1,"","OnnxReduceSumSquare_13"],[61,1,1,"","OnnxReduceSum_1"],[61,1,1,"","OnnxReduceSum_11"],[61,1,1,"","OnnxReduceSum_13"],[61,1,1,"","OnnxRelu"],[61,1,1,"","OnnxRelu_1"],[61,1,1,"","OnnxRelu_13"],[61,1,1,"","OnnxRelu_14"],[61,1,1,"","OnnxRelu_6"],[61,1,1,"","OnnxReshape"],[61,1,1,"","OnnxReshape_1"],[61,1,1,"","OnnxReshape_13"],[61,1,1,"","OnnxReshape_14"],[61,1,1,"","OnnxReshape_5"],[61,1,1,"","OnnxResize"],[61,1,1,"","OnnxResize_10"],[61,1,1,"","OnnxResize_11"],[61,1,1,"","OnnxResize_13"],[61,1,1,"","OnnxReverseSequence"],[61,1,1,"","OnnxReverseSequence_10"],[61,1,1,"","OnnxRoiAlign"],[61,1,1,"","OnnxRoiAlign_10"],[61,1,1,"","OnnxRoiAlign_16"],[61,1,1,"","OnnxRound"],[61,1,1,"","OnnxRound_11"],[61,1,1,"","OnnxSVMClassifier"],[61,1,1,"","OnnxSVMClassifier_1"],[61,1,1,"","OnnxSVMRegressor"],[61,1,1,"","OnnxSVMRegressor_1"],[61,1,1,"","OnnxScaler"],[61,1,1,"","OnnxScaler_1"],[61,1,1,"","OnnxScan"],[61,1,1,"","OnnxScan_11"],[61,1,1,"","OnnxScan_16"],[61,1,1,"","OnnxScan_8"],[61,1,1,"","OnnxScan_9"],[61,1,1,"","OnnxScatter"],[61,1,1,"","OnnxScatterElements"],[61,1,1,"","OnnxScatterElements_11"],[61,1,1,"","OnnxScatterElements_13"],[61,1,1,"","OnnxScatterElements_16"],[61,1,1,"","OnnxScatterND"],[61,1,1,"","OnnxScatterND_11"],[61,1,1,"","OnnxScatterND_13"],[61,1,1,"","OnnxScatterND_16"],[61,1,1,"","OnnxScatter_11"],[61,1,1,"","OnnxScatter_9"],[61,1,1,"","OnnxSelu"],[61,1,1,"","OnnxSelu_1"],[61,1,1,"","OnnxSelu_6"],[61,1,1,"","OnnxSequenceAt"],[61,1,1,"","OnnxSequenceAt_11"],[61,1,1,"","OnnxSequenceConstruct"],[61,1,1,"","OnnxSequenceConstruct_11"],[61,1,1,"","OnnxSequenceEmpty"],[61,1,1,"","OnnxSequenceEmpty_11"],[61,1,1,"","OnnxSequenceErase"],[61,1,1,"","OnnxSequenceErase_11"],[61,1,1,"","OnnxSequenceInsert"],[61,1,1,"","OnnxSequenceInsert_11"],[61,1,1,"","OnnxSequenceLength"],[61,1,1,"","OnnxSequenceLength_11"],[61,1,1,"","OnnxShape"],[61,1,1,"","OnnxShape_1"],[61,1,1,"","OnnxShape_13"],[61,1,1,"","OnnxShape_15"],[61,1,1,"","OnnxShrink"],[61,1,1,"","OnnxShrink_9"],[61,1,1,"","OnnxSigmoid"],[61,1,1,"","OnnxSigmoid_1"],[61,1,1,"","OnnxSigmoid_13"],[61,1,1,"","OnnxSigmoid_6"],[61,1,1,"","OnnxSign"],[61,1,1,"","OnnxSign_13"],[61,1,1,"","OnnxSign_9"],[61,1,1,"","OnnxSin"],[61,1,1,"","OnnxSin_7"],[61,1,1,"","OnnxSinh"],[61,1,1,"","OnnxSinh_9"],[61,1,1,"","OnnxSize"],[61,1,1,"","OnnxSize_1"],[61,1,1,"","OnnxSize_13"],[61,1,1,"","OnnxSlice"],[61,1,1,"","OnnxSlice_1"],[61,1,1,"","OnnxSlice_10"],[61,1,1,"","OnnxSlice_11"],[61,1,1,"","OnnxSlice_13"],[61,1,1,"","OnnxSoftmax"],[61,1,1,"","OnnxSoftmaxCrossEntropyLoss"],[61,1,1,"","OnnxSoftmaxCrossEntropyLoss_12"],[61,1,1,"","OnnxSoftmaxCrossEntropyLoss_13"],[61,1,1,"","OnnxSoftmax_1"],[61,1,1,"","OnnxSoftmax_11"],[61,1,1,"","OnnxSoftmax_13"],[61,1,1,"","OnnxSoftplus"],[61,1,1,"","OnnxSoftplus_1"],[61,1,1,"","OnnxSoftsign"],[61,1,1,"","OnnxSoftsign_1"],[61,1,1,"","OnnxSpaceToDepth"],[61,1,1,"","OnnxSpaceToDepth_1"],[61,1,1,"","OnnxSpaceToDepth_13"],[61,1,1,"","OnnxSplit"],[61,1,1,"","OnnxSplitToSequence"],[61,1,1,"","OnnxSplitToSequence_11"],[61,1,1,"","OnnxSplit_1"],[61,1,1,"","OnnxSplit_11"],[61,1,1,"","OnnxSplit_13"],[61,1,1,"","OnnxSplit_2"],[61,1,1,"","OnnxSqrt"],[61,1,1,"","OnnxSqrt_1"],[61,1,1,"","OnnxSqrt_13"],[61,1,1,"","OnnxSqrt_6"],[61,1,1,"","OnnxSqueeze"],[61,1,1,"","OnnxSqueeze_1"],[61,1,1,"","OnnxSqueeze_11"],[61,1,1,"","OnnxSqueeze_13"],[61,1,1,"","OnnxStringNormalizer"],[61,1,1,"","OnnxStringNormalizer_10"],[61,1,1,"","OnnxSub"],[61,1,1,"","OnnxSub_1"],[61,1,1,"","OnnxSub_13"],[61,1,1,"","OnnxSub_14"],[61,1,1,"","OnnxSub_6"],[61,1,1,"","OnnxSub_7"],[61,1,1,"","OnnxSum"],[61,1,1,"","OnnxSum_1"],[61,1,1,"","OnnxSum_13"],[61,1,1,"","OnnxSum_6"],[61,1,1,"","OnnxSum_8"],[61,1,1,"","OnnxTan"],[61,1,1,"","OnnxTan_7"],[61,1,1,"","OnnxTanh"],[61,1,1,"","OnnxTanh_1"],[61,1,1,"","OnnxTanh_13"],[61,1,1,"","OnnxTanh_6"],[61,1,1,"","OnnxTfIdfVectorizer"],[61,1,1,"","OnnxTfIdfVectorizer_9"],[61,1,1,"","OnnxThresholdedRelu"],[61,1,1,"","OnnxThresholdedRelu_10"],[61,1,1,"","OnnxTile"],[61,1,1,"","OnnxTile_1"],[61,1,1,"","OnnxTile_13"],[61,1,1,"","OnnxTile_6"],[61,1,1,"","OnnxTopK"],[61,1,1,"","OnnxTopK_1"],[61,1,1,"","OnnxTopK_10"],[61,1,1,"","OnnxTopK_11"],[61,1,1,"","OnnxTranspose"],[61,1,1,"","OnnxTranspose_1"],[61,1,1,"","OnnxTranspose_13"],[61,1,1,"","OnnxTreeEnsembleClassifier"],[61,1,1,"","OnnxTreeEnsembleClassifier_1"],[61,1,1,"","OnnxTreeEnsembleClassifier_3"],[61,1,1,"","OnnxTreeEnsembleRegressor"],[61,1,1,"","OnnxTreeEnsembleRegressor_1"],[61,1,1,"","OnnxTreeEnsembleRegressor_3"],[61,1,1,"","OnnxTrilu"],[61,1,1,"","OnnxTrilu_14"],[61,1,1,"","OnnxUnique"],[61,1,1,"","OnnxUnique_11"],[61,1,1,"","OnnxUnsqueeze"],[61,1,1,"","OnnxUnsqueeze_1"],[61,1,1,"","OnnxUnsqueeze_11"],[61,1,1,"","OnnxUnsqueeze_13"],[61,1,1,"","OnnxUpsample"],[61,1,1,"","OnnxUpsample_10"],[61,1,1,"","OnnxUpsample_7"],[61,1,1,"","OnnxUpsample_9"],[61,1,1,"","OnnxWhere"],[61,1,1,"","OnnxWhere_16"],[61,1,1,"","OnnxWhere_9"],[61,1,1,"","OnnxXor"],[61,1,1,"","OnnxXor_1"],[61,1,1,"","OnnxXor_7"],[61,1,1,"","OnnxZipMap"],[61,1,1,"","OnnxZipMap_1"]],"skl2onnx.algebra.sklearn_ops":[[61,1,1,"","OnnxCastRegressor"],[61,1,1,"","OnnxCastTransformer"],[61,1,1,"","OnnxReplaceTransformer"],[61,1,1,"","OnnxSklearnARDRegression"],[61,1,1,"","OnnxSklearnAdaBoostClassifier"],[61,1,1,"","OnnxSklearnAdaBoostRegressor"],[61,1,1,"","OnnxSklearnBaggingClassifier"],[61,1,1,"","OnnxSklearnBaggingRegressor"],[61,1,1,"","OnnxSklearnBayesianGaussianMixture"],[61,1,1,"","OnnxSklearnBayesianRidge"],[61,1,1,"","OnnxSklearnBernoulliNB"],[61,1,1,"","OnnxSklearnBinarizer"],[61,1,1,"","OnnxSklearnCalibratedClassifierCV"],[61,1,1,"","OnnxSklearnCategoricalNB"],[61,1,1,"id1","OnnxSklearnColumnTransformer"],[61,1,1,"","OnnxSklearnComplementNB"],[61,1,1,"","OnnxSklearnCountVectorizer"],[61,1,1,"","OnnxSklearnDecisionTreeClassifier"],[61,1,1,"","OnnxSklearnDecisionTreeRegressor"],[61,1,1,"","OnnxSklearnDictVectorizer"],[61,1,1,"","OnnxSklearnElasticNet"],[61,1,1,"","OnnxSklearnElasticNetCV"],[61,1,1,"","OnnxSklearnExtraTreeClassifier"],[61,1,1,"","OnnxSklearnExtraTreeRegressor"],[61,1,1,"","OnnxSklearnExtraTreesClassifier"],[61,1,1,"","OnnxSklearnExtraTreesRegressor"],[61,1,1,"id2","OnnxSklearnFeatureUnion"],[61,1,1,"","OnnxSklearnFunctionTransformer"],[61,1,1,"","OnnxSklearnGaussianMixture"],[61,1,1,"","OnnxSklearnGaussianNB"],[61,1,1,"","OnnxSklearnGaussianProcessClassifier"],[61,1,1,"","OnnxSklearnGaussianProcessRegressor"],[61,1,1,"","OnnxSklearnGaussianRandomProjection"],[61,1,1,"","OnnxSklearnGenericUnivariateSelect"],[61,1,1,"","OnnxSklearnGradientBoostingClassifier"],[61,1,1,"","OnnxSklearnGradientBoostingRegressor"],[61,1,1,"","OnnxSklearnGridSearchCV"],[61,1,1,"","OnnxSklearnHistGradientBoostingClassifier"],[61,1,1,"","OnnxSklearnHistGradientBoostingRegressor"],[61,1,1,"","OnnxSklearnHuberRegressor"],[61,1,1,"","OnnxSklearnIncrementalPCA"],[61,1,1,"","OnnxSklearnIsolationForest"],[61,1,1,"","OnnxSklearnKBinsDiscretizer"],[61,1,1,"","OnnxSklearnKMeans"],[61,1,1,"","OnnxSklearnKNNImputer"],[61,1,1,"","OnnxSklearnKNeighborsClassifier"],[61,1,1,"","OnnxSklearnKNeighborsRegressor"],[61,1,1,"","OnnxSklearnKNeighborsTransformer"],[61,1,1,"","OnnxSklearnKernelCenterer"],[61,1,1,"","OnnxSklearnKernelPCA"],[61,1,1,"","OnnxSklearnLabelBinarizer"],[61,1,1,"","OnnxSklearnLabelEncoder"],[61,1,1,"","OnnxSklearnLars"],[61,1,1,"","OnnxSklearnLarsCV"],[61,1,1,"","OnnxSklearnLasso"],[61,1,1,"","OnnxSklearnLassoCV"],[61,1,1,"","OnnxSklearnLassoLars"],[61,1,1,"","OnnxSklearnLassoLarsCV"],[61,1,1,"","OnnxSklearnLassoLarsIC"],[61,1,1,"","OnnxSklearnLinearDiscriminantAnalysis"],[61,1,1,"","OnnxSklearnLinearRegression"],[61,1,1,"","OnnxSklearnLinearSVC"],[61,1,1,"","OnnxSklearnLinearSVR"],[61,1,1,"","OnnxSklearnLocalOutlierFactor"],[61,1,1,"","OnnxSklearnLogisticRegression"],[61,1,1,"","OnnxSklearnLogisticRegressionCV"],[61,1,1,"","OnnxSklearnMLPClassifier"],[61,1,1,"","OnnxSklearnMLPRegressor"],[61,1,1,"","OnnxSklearnMaxAbsScaler"],[61,1,1,"","OnnxSklearnMinMaxScaler"],[61,1,1,"","OnnxSklearnMiniBatchKMeans"],[61,1,1,"","OnnxSklearnMultiOutputClassifier"],[61,1,1,"","OnnxSklearnMultiOutputRegressor"],[61,1,1,"","OnnxSklearnMultiTaskElasticNet"],[61,1,1,"","OnnxSklearnMultiTaskElasticNetCV"],[61,1,1,"","OnnxSklearnMultiTaskLasso"],[61,1,1,"","OnnxSklearnMultiTaskLassoCV"],[61,1,1,"","OnnxSklearnMultinomialNB"],[61,1,1,"","OnnxSklearnNearestNeighbors"],[61,1,1,"","OnnxSklearnNeighborhoodComponentsAnalysis"],[61,1,1,"","OnnxSklearnNormalizer"],[61,1,1,"","OnnxSklearnNuSVC"],[61,1,1,"","OnnxSklearnNuSVR"],[61,1,1,"","OnnxSklearnOneClassSVM"],[61,1,1,"","OnnxSklearnOneHotEncoder"],[61,1,1,"","OnnxSklearnOneVsRestClassifier"],[61,1,1,"","OnnxSklearnOrdinalEncoder"],[61,1,1,"","OnnxSklearnOrthogonalMatchingPursuit"],[61,1,1,"","OnnxSklearnOrthogonalMatchingPursuitCV"],[61,1,1,"","OnnxSklearnPCA"],[61,1,1,"","OnnxSklearnPLSRegression"],[61,1,1,"","OnnxSklearnPassiveAggressiveClassifier"],[61,1,1,"","OnnxSklearnPassiveAggressiveRegressor"],[61,1,1,"","OnnxSklearnPerceptron"],[61,1,1,"id0","OnnxSklearnPipeline"],[61,1,1,"","OnnxSklearnPoissonRegressor"],[61,1,1,"","OnnxSklearnPolynomialFeatures"],[61,1,1,"","OnnxSklearnPowerTransformer"],[61,1,1,"","OnnxSklearnQuantileRegressor"],[61,1,1,"","OnnxSklearnRANSACRegressor"],[61,1,1,"","OnnxSklearnRFE"],[61,1,1,"","OnnxSklearnRFECV"],[61,1,1,"","OnnxSklearnRadiusNeighborsClassifier"],[61,1,1,"","OnnxSklearnRadiusNeighborsRegressor"],[61,1,1,"","OnnxSklearnRandomForestClassifier"],[61,1,1,"","OnnxSklearnRandomForestRegressor"],[61,1,1,"","OnnxSklearnRandomTreesEmbedding"],[61,1,1,"","OnnxSklearnRidge"],[61,1,1,"","OnnxSklearnRidgeCV"],[61,1,1,"","OnnxSklearnRidgeClassifier"],[61,1,1,"","OnnxSklearnRidgeClassifierCV"],[61,1,1,"","OnnxSklearnRobustScaler"],[61,1,1,"","OnnxSklearnSGDClassifier"],[61,1,1,"","OnnxSklearnSGDRegressor"],[61,1,1,"","OnnxSklearnSVC"],[61,1,1,"","OnnxSklearnSVR"],[61,1,1,"","OnnxSklearnSelectFdr"],[61,1,1,"","OnnxSklearnSelectFpr"],[61,1,1,"","OnnxSklearnSelectFromModel"],[61,1,1,"","OnnxSklearnSelectFwe"],[61,1,1,"","OnnxSklearnSelectKBest"],[61,1,1,"","OnnxSklearnSelectPercentile"],[61,1,1,"","OnnxSklearnSimpleImputer"],[61,1,1,"","OnnxSklearnStackingClassifier"],[61,1,1,"","OnnxSklearnStackingRegressor"],[61,1,1,"","OnnxSklearnStandardScaler"],[61,1,1,"","OnnxSklearnTfidfTransformer"],[61,1,1,"","OnnxSklearnTfidfVectorizer"],[61,1,1,"","OnnxSklearnTheilSenRegressor"],[61,1,1,"","OnnxSklearnTruncatedSVD"],[61,1,1,"","OnnxSklearnTweedieRegressor"],[61,1,1,"","OnnxSklearnVarianceThreshold"],[61,1,1,"","OnnxSklearnVotingClassifier"],[61,1,1,"","OnnxSklearnVotingRegressor"]],"skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer":[[61,2,1,"","onnx_converter"],[61,2,1,"","onnx_parser"],[61,2,1,"","onnx_shape_calculator"],[61,2,1,"","to_onnx"],[61,2,1,"","to_onnx_operator"]],"skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion":[[61,2,1,"","onnx_converter"],[61,2,1,"","onnx_parser"],[61,2,1,"","onnx_shape_calculator"],[61,2,1,"","to_onnx"],[61,2,1,"","to_onnx_operator"]],"skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline":[[61,2,1,"","onnx_converter"],[61,2,1,"","onnx_parser"],[61,2,1,"","onnx_shape_calculator"],[61,2,1,"","to_onnx"],[61,2,1,"","to_onnx_operator"]],"skl2onnx.common._container":[[0,1,1,"","ModelComponentContainer"],[0,1,1,"","SklearnModelContainerNode"]],"skl2onnx.common._container.ModelComponentContainer":[[0,2,1,"","add_initializer"],[0,2,1,"","add_input"],[0,2,1,"","add_node"],[0,2,1,"","add_output"]],"skl2onnx.common._container.SklearnModelContainerNode":[[0,3,1,"","input_names"],[0,3,1,"","output_names"]],"skl2onnx.common._topology":[[0,1,1,"","Operator"],[0,1,1,"","Scope"],[0,1,1,"","Topology"],[0,1,1,"","Variable"],[0,0,1,"","convert_topology"]],"skl2onnx.common._topology.Scope":[[0,2,1,"","get_unique_operator_name"],[0,2,1,"","get_unique_variable_name"]],"skl2onnx.common.utils":[[0,0,1,"","check_input_and_output_numbers"],[0,0,1,"","check_input_and_output_types"]],"skl2onnx.helpers.onnx_helper":[[0,0,1,"","enumerate_model_node_outputs"],[0,0,1,"","load_onnx_model"],[0,0,1,"","save_onnx_model"],[0,0,1,"","select_model_inputs_outputs"]],"skl2onnx.operator_converters.text_vectoriser":[[59,0,1,"","convert_sklearn_text_vectorizer"]],skl2onnx:[[0,0,1,"","convert_sklearn"],[0,0,1,"","get_latest_tested_opset_version"],[0,0,1,"","supported_converters"],[0,0,1,"","to_onnx"],[0,0,1,"","update_registered_converter"],[0,0,1,"","update_registered_parser"]]},objnames:{"0":["py","function","Python function"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","property","Python property"]},objtypes:{"0":"py:function","1":"py:class","2":"py:method","3":"py:property"},terms:{"0":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,54,55,56,57,58,59,60,61,65],"00":[3,18,27,30,43,47,52,55],"000":[51,55],"000000":31,"000001":[43,48],"000002":30,"000003":30,"000005":[3,52],"000006":52,"000010":[30,52],"000011":[3,48],"000012":3,"000014":30,"000015":48,"000017":48,"000018":[30,48],"000021":3,"000029":48,"000040":3,"000081":[3,30],"0001":61,"0001083172664970196":43,"0002951417065241126":[45,46,48,49],"000304":3,"0003158352661955726":47,"000397":30,"000448":30,"000455":30,"000531":30,"0005483764980302357":[48,49],"000592":30,"000598":30,"0006638e":14,"00066408e":14,"000722":3,"000922":30,"001":61,"001172":30,"0015032000000019252":11,"0015074000000012688":11,"001538400000001161":11,"0015596000000002164":11,"0015722999999994158":11,"00159219999999749":11,"0016032e":14,"00160347e":14,"0016416000000027964":11,"0016430000000013933":11,"0016695000000019888":11,"0017163999999993962":11,"00176908":16,"0017708":16,"0018040900000016792":35,"00184764":16,"00192241":16,"0020":61,"00248988":16,"00264343":37,"00273340000000033":11,"002733799999997899":11,"00273870000000187":11,"0027413000000002796":11,"002797199999999833":11,"002814559999993094":35,"002864799999997558":11,"0029658999999995217":11,"003":55,"0030254999999996812":11,"0030255":37,"0031782499999962965":35,"0034106999999998777":11,"003925":30,"0039437000000006606":11,"00394489999999692":11,"003952999999999207":11,"003956299999998691":11,"003962199999996585":11,"0039623000000013064":11,"004037":30,"0040483":37,"00404832":37,"00416552":37,"004245":30,"004249":30,"004302":30,"004371":30,"004433":30,"004482":30,"004501":30,"004546":30,"004711300000000307":11,"004821050000001037":35,"0058392000000004884":11,"005944400000000627":11,"005951800000001839":11,"006":[53,55],"00633966":37,"00650185":33,"006523":37,"0065230131149292":37,"006559":30,"00678098":37,"0068076":34,"0068088":34,"007055":30,"007082":30,"00711049":33,"00723617":37,"00737737":33,"0073865982703864574":11,"0073866":11,"00782984":33,"00792406e":47,"00827491":33,"00829443":37,"008331673219799995":11,"00845791":37,"008642":37,"008658":30,"009":[25,27],"00911009e":18,"00911047e":18,"009464402683079243":17,"00955989":37,"00982060e":18,"00982118e":18,"009999999776482582":61,"00it":52,"01":[4,11,13,14,18,27,30,35,38,47,55,60,61],"010468482971191":32,"01139555":37,"0116":20,"0124999999993066e":3,"01281316":37,"01347652e":18,"01349142e":18,"0145789":37,"0146841":21,"0146841004844296":21,"01472526602447033":35,"01472527":35,"01513279":37,"01529924":41,"0167562":[45,46,48,49],"017042":30,"017088":30,"01757041429218e":[48,49],"017997":30,"018039":30,"01808184":37,"01835155e":18,"01835168e":18,"01887599192559719":17,"01900435":52,"019004351971607":52,"0190045833587646":37,"0190046":37,"019102199003100395":17,"01946042":37,"019845":30,"02":[3,14,18,27,30,35,43,47,55],"02025":61,"021003e":43,"02104169":8,"021041708067059517":8,"021282":30,"02188403":37,"022738933563232":32,"02426777593791485":17,"02453122":45,"02615768":37,"02617":40,"02712265":37,"02737038":37,"02762963":37,"02913":40,"02915135441384":36,"029325326904654503":32,"03":[11,13,18,27,35,38,47,55],"0300019":37,"03125":16,"03167":61,"032402":37,"03242342":37,"03283946":37,"03326801":37,"03478567":12,"03497888":37,"035177":37,"035177230834961":37,"035580158233643":32,"035947":31,"03594739":31,"03632069":45,"03664":40,"03678461":37,"03710592e":18,"03710651e":18,"0385284423828125":32,"03853641":37,"03930473e":18,"03930533e":18,"03950266e":18,"03950286e":18,"04":[13,14,18,30,35,38,43],"04034399":[45,46,48,49],"041125":3,"04248866":37,"0430717":37,"04332870e":18,"04333529e":18,"04525625":37,"045351505279541016":37,"04535151":37,"046":[20,27],"046217918395996":32,"04646993":37,"046576181972796e":[45,46,48,49],"04697429":37,"04698204":37,"04740147":37,"047564506530762":32,"048057686537504196":8,"04810077":37,"048381537199020386":2,"049410343170166":32,"05":[3,6,8,14,15,29,30,35,36,43,55,61],"05036852":37,"050557":37,"0505571365356445":37,"05055769":37,"0506999492645264":61,"0507":61,"0507009873554804934193349852946":61,"0507010221481323":61,"05070102214813232421875":61,"05342943":37,"054587215":21,"05458721536638154":21,"05733333":45,"05811969e":[4,18],"05811991e":[4,18],"0581199e":18,"06":[3,4,15,18,27,30,37,43,48,49,61],"060":[46,55],"06034971":37,"060494836419820786":35,"06049484":35,"06146":40,"062030207365751266":35,"06203021":35,"062292098999023":32,"063368797302246":8,"06462905":37,"064851498577167e":43,"0649694204330444":17,"06536484":37,"06765":40,"06870":61,"069":[15,27],"0692":40,"06it":52,"07":[4,15,18,29,30,35,37,43,45,46,47,48,49,61],"07041626e":18,"07041881e":18,"071470737457275":32,"0727878":[45,46,48,49],"072888374328613":32,"0757758617401123":32,"0773670673370361":8,"07814487814903259":17,"07990900e":13,"0799096e":13,"08":[4,11,16,18,27,36,43,52,61],"080":[10,27],"08022":61,"08026457e":18,"08026490e":18,"08067281":37,"08201734":37,"084038496017456":17,"08509461e":18,"08509481e":18,"086":[32,55],"08628479":37,"08705397":37,"08776655":37,"089686393737793":32,"09":[15,43,52,61],"09001251":37,"09132468":37,"09168785":37,"091d371":[7,17],"092":[8,27],"09231844":37,"092388":33,"09288714":37,"09293693":41,"094013214111328":32,"09414224326610565":21,"094844":30,"09485311e":18,"09485388e":18,"095537650763756e":37,"0958103":37,"095824":33,"09585991":16,"09600093":16,"0965562043784303e":36,"09655735":37,"097119":30,"09737":40,"098":[38,55],"098145":44,"0984854e":35,"09865843":37,"09881086":37,"09907253":37,"09950908":[45,46,48,49],"099685":30,"099999904632568":37,"0f":[40,61],"0th":61,"0x0000021dd4c37df0":39,"0x0000021de87ffa30":36,"1":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,57,59,60,61,65],"10":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,30,32,35,36,37,40,43,44,45,48,50,53,54,57,60,61],"100":[3,11,30,32,35,36,43,44,48,52,53,61],"1000":[3,4,18,30,35,40,43,48,61],"10000":[3,4,5,6,30,34,36,61],"100000":40,"101":32,"10101":33,"102":32,"10210604":8,"10210609436035156":8,"1024":61,"103":32,"1033":17,"104":[32,41,55],"10449100e":18,"10449110e":18,"10464272e":47,"104850":30,"105":32,"10512":40,"10521298":37,"10552978515625":37,"10553":37,"106":[19,27,32],"10617606e":18,"10619572e":18,"106628442140432":21,"1066285":21,"106786":30,"107":[32,54],"1071343":33,"107921":30,"108":32,"10849001":37,"10890":30,"109":32,"10917443":21,"10917443073276308":21,"10948393":33,"10967488":37,"10968":40,"10999425":37,"10999998":39,"10it":52,"11":[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,30,32,33,35,36,38,39,43,57,61],"110":[30,32,35],"11000":30,"11066373":17,"111":32,"1115":[25,44,52],"112":[32,35],"112022399902344":32,"1121":17,"112297058105469":32,"112561e":43,"112663269042969":32,"1127345561981201":21,"113":32,"1132966":37,"114":[32,35],"115":32,"11564035713672638":8,"11564038":8,"116":32,"11613683":37,"1163711":61,"117":32,"11774537":37,"118":32,"1188793182373047":32,"119":[12,27,32,40],"11952":40,"11it":52,"12":[3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,27,29,32,34,35,37,38,40,42,43,44,52,61],"120":[32,43],"12032884e":18,"12032919e":18,"121":32,"121067047119141":32,"121484279632568":32,"12188023":37,"122":32,"12224641":37,"12237":26,"1224":[25,44],"12263762":7,"12263762950897217":7,"12263763":7,"123":[32,61],"12321156":37,"12353003":37,"124":32,"1240495":37,"12424":40,"125":[32,40,61],"12581898":37,"12585926e":47,"126":32,"1260029e":35,"1266":22,"127":[32,61],"128":[32,40,61],"129":32,"13":[4,18,22,30,32,43,61],"130":32,"13002382":37,"1303":21,"13063404":[45,46,48,49],"131":32,"131063461303711":32,"1315000000067243e":3,"13197924":37,"13197947932162468":52,"13197948":52,"132":32,"13255882e":18,"13256034e":18,"13264":5,"1326988e":35,"13342835":29,"13343":29,"133726596832275":32,"13396745920181274":11,"13396746":11,"13401784":37,"13540568":[45,46,48,49],"136129856109619":32,"137":[13,27],"13765850e":18,"13765854e":18,"13792053":33,"13822884":37,"13826195":37,"138424e":30,"13913":40,"13942432403564453":32,"13960505e":18,"13960509e":18,"13996294":37,"13it":52,"14":[4,16,17,18,22,30,31,32,35,36,40,43,49,53,61],"140":43,"1405":32,"140735626220703":8,"141592":61,"1415926459":61,"143":44,"1430167":37,"14301691":52,"1430169111851105":52,"14311522":37,"14384613e":18,"14384818e":18,"14390673":37,"14462455":41,"14490402":37,"145":40,"145303":37,"1456419e":38,"14564701e":18,"14564866e":18,"14580827":37,"146186828613281":32,"14683513":37,"14800124":17,"15":[18,22,32,40,43,52,56,61],"150":43,"15000":61,"1502":61,"1506":61,"15284600853919983":7,"15284601":7,"15284604":7,"15362482":21,"15362482301947428":21,"15495988726615906":32,"15593736e":47,"156":40,"15669105e":18,"15669155e":18,"157":40,"158231735229492":32,"1585729":37,"158615":37,"15861505":37,"15926":61,"16":[18,22,32,37,43,52,61],"160":43,"1607":61,"16142101":37,"16144533":16,"1624706":34,"16247152":34,"1657182000000006":4,"16604995":37,"16634221":33,"16636059":37,"1674340963363647":37,"1674341":37,"168":40,"169000":40,"16935596e":18,"16935601e":18,"1697":57,"16970102e":18,"16970128e":18,"17":[16,22,25,32,43,44,52,61],"170":43,"1703":61,"1707":[6,22],"172":29,"17402084":37,"175":29,"17526444":37,"17526973":37,"17581303":25,"17581303417682648":25,"17688445":16,"17771609999999782":4,"17819276452064514":17,"178422927856445":32,"17926627":37,"18":[22,32,43,52],"180142402648926":32,"1814093589782715":32,"18177342e":18,"18177631e":18,"18264007":37,"18287527":37,"18287552893161774":17,"18298979e":47,"1830756":37,"18341242":37,"18354762":33,"18358725":37,"184":[54,55],"18415856":44,"1841585636138916":44,"18642520904541":32,"186742782592773":32,"187":61,"187482833862305":32,"18764767":37,"188":[48,55],"1882844865322113":21,"18852343":37,"19":[4,16,18,22,32,43,52],"19031307":37,"19061026":37,"19085159021082634":21,"1908516":21,"19234391":37,"19397247e":47,"194":40,"19439172744751":32,"19454679":37,"19577242":37,"197":40,"19718995":37,"19728662":33,"1979":25,"198":40,"19933333":45,"1d":61,"1e":[45,50,60,61],"1e5":2,"1e8":61,"1st":61,"2":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,47,48,49,50,52,53,54,57,59,60,61],"20":[3,5,6,7,10,12,13,14,16,17,22,24,25,26,30,32,43,52,61],"200":[43,53,61],"20000000298023224":[37,61],"2007":6,"201":40,"2017":[7,17,56],"20249173045158386":32,"20261743":37,"203":40,"203311920166016":32,"20355969":37,"2049":24,"205":40,"205226":37,"207":40,"2071":12,"20748818":37,"208":40,"20887226e":14,"2088726e":14,"209":40,"20983625":37,"20986655":37,"21":[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,32,35,43,57],"210075":40,"21113562":37,"21148368":37,"21154793":37,"212":40,"212071e":30,"21238177e":18,"21238220e":18,"2129":54,"21295824":37,"21295893":37,"2129589319229126":37,"213":40,"21314716e":[4,18],"21315361e":[4,18],"2131536e":18,"2138161":[45,46,48,49],"214":40,"2147483647":61,"2149":59,"21588576e":18,"21588602e":18,"216":[11,27,40],"21618828":37,"216431617736816":32,"217":40,"21796290e":18,"21796322e":18,"218":40,"21999998":39,"22":[22,30,32,39,52],"220":40,"22000003":39,"22040251":37,"220446049250313e":61,"221":40,"22119207e":18,"22119273e":18,"222":40,"22297775":37,"223":40,"22329554":37,"2234":40,"22346979e":18,"22346997e":18,"22382017":45,"224":40,"22441299":37,"22498065e":16,"225":40,"22513888e":[4,18],"22514706e":[4,18],"2251471e":18,"226":40,"22639418e":18,"22639567e":18,"226452350616455":32,"22683089":37,"2272":54,"22822113":37,"22844934463501":32,"22860026":37,"23":[22,30,32,43],"230":40,"231":40,"231082558631897":17,"2311158180236816":17,"232":40,"232491493225098":32,"233448028564453":32,"2336976":37,"234":40,"2349296":33,"235":40,"236":40,"23688316":42,"23688316345214844":42,"23708422":16,"23721537e":18,"23721600e":18,"23722566664218903":42,"23722567":42,"23742547":37,"2374443":37,"238":40,"23816":44,"23871285e":[4,18],"23872068e":[4,18],"2387207e":18,"239":40,"239633560180664":32,"24":[26,32],"24048182e":18,"24048193e":18,"240567207336426":32,"2420356":16,"242347478866577":32,"24274589":37,"243":40,"24376":40,"244":[9,27,40],"2442":59,"24683":40,"247":40,"2489329":37,"24937188e":18,"24937190e":18,"24946737":[45,46,48,49],"25":[16,30,32,43,61],"251":40,"25267619":37,"253":40,"25323":40,"253325462341309":32,"25361098":37,"254":40,"25465202331543":32,"2548935":37,"255":[40,61],"2553710341453552":24,"25537106":24,"25547981262207":32,"2558":[24,25],"25638099":37,"258":40,"2595":10,"26":[32,39,40,43],"26000002":39,"2602147e":35,"261103630065918":32,"261491775512695":32,"26243707e":47,"2627763e":35,"26280227303504944":17,"26327469":37,"2637671":37,"26421417":37,"2643":13,"2646049999993636e":3,"26468200000000053":4,"26483383774757385":35,"26483384":35,"265":40,"2652202546596527":17,"26547013":37,"266609999999346e":3,"267":40,"26754513e":47,"26819164":37,"2686633e":38,"26945032":37,"26it":52,"27":[32,43],"270":[39,55],"2707398":33,"2708714":37,"27264":40,"27575645":37,"27612122893333435":21,"27618123":37,"27646":40,"276545524597168":32,"27727048":37,"28":[17,32,61],"28416057":37,"285":26,"28591141":37,"28635925":26,"287":40,"29":[17,30,32,39],"29000002":39,"29000038":39,"2903":14,"29090086e":18,"29090165e":18,"29285214e":18,"29285216e":18,"29304411":37,"29353551e":18,"29353619e":18,"293953":40,"29410146e":18,"29410172e":18,"2947186":37,"2965405000000061":34,"2c_2":40,"2d":61,"2nd":61,"2o":61,"2x":61,"3":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,30,31,32,33,35,37,38,39,40,41,42,43,44,45,47,52,54,57,60,61],"30":[32,33,61],"300":[5,6,7,10,12,13,14,17,22,24,25,26,43,53,54,61],"300246238708496":32,"30075052":37,"30081844329834":32,"3011":40,"3028355589916885e":43,"30295342":37,"3049873":37,"3049873113632202":37,"30563080e":16,"306":40,"30607344e":[4,18],"3060734e":18,"30607528e":[4,18],"30724964":21,"30724964603894855":21,"308":59,"309634208679199":32,"309723377227783":32,"30977167":37,"31":[16,32,52,61],"3108374e":13,"31083751e":13,"311147212982178":32,"3111534":37,"31126453":24,"3112645745277405":24,"313451e":43,"314":61,"3140236e":35,"31443589e":18,"31443739e":18,"31533698e":18,"31533718e":18,"3154442":37,"31653995":37,"31817617e":18,"31819277e":18,"31830999":37,"31907679":37,"32":[16,30,32,43,55,61],"32016109e":18,"32016134e":18,"32128686":37,"322":6,"323045":52,"32311723":37,"32392645e":18,"32392659e":18,"32524378e":18,"32524411e":18,"32559893e":[4,18],"32561811e":[4,18],"3256181e":18,"32571939":37,"327":59,"32706":40,"3282425":37,"329":[31,43,55],"33":[17,25,32,43,55],"3307213e":13,"33073272e":13,"3316732e":11,"3321":54,"33264308":37,"334753036499023":32,"33482453":37,"33653691":37,"337316e":43,"33753092":37,"339":40,"339177131652832":32,"339502":40,"3396746e":11,"34":32,"340065":40,"3402264":37,"340226411819458":37,"34023352":37,"34081019999999285":34,"341":[14,27],"341296195983887":32,"342":40,"34329146":37,"345580577850342":32,"34561415":37,"34561639999999727":5,"345777e":43,"34749":40,"3480018":37,"348924160003662":32,"3499275":[45,46,48,49],"35":[32,39,43,44,61],"3503222":37,"3507236":37,"351550102233887":32,"352":38,"35419":40,"35554346":44,"35554346442222595":44,"35576868057251":32,"357":40,"35721732":37,"35747592":37,"35790036":[45,46,48,49],"35869288e":18,"35869345e":18,"359419822692871":32,"35it":3,"36":[32,39,52,61],"3600003":39,"3618706":37,"36345425":37,"363498":37,"36351":40,"366023":37,"366023063659668":37,"366146087646484":32,"36736664":37,"36769632":45,"3697":57,"3697155":37,"36e":36,"36it":52,"37":[30,32,40],"37049946188926697":17,"371":[45,55],"37130203e":18,"37130213e":18,"3714":40,"37186092":37,"37258261":37,"3727778000000015":4,"3727787":33,"373":[40,55],"37364638e":18,"37364649e":18,"37441392":[45,46,48,49],"37471011":37,"375":[49,55],"3750882":37,"375840":3,"3761973":37,"376197338104248":37,"37699732":37,"37707573":37,"37730860710144043":25,"37730864":25,"37859321e":18,"37859393e":18,"379":[2,27],"37943811":37,"379992":3,"38":[11,32,35,61],"380":[50,55],"38023":44,"382072448730469":32,"38255534":37,"383":[35,55],"384026050567627":32,"38457":40,"385434e":43,"38617":40,"3865983e":11,"3883555e":35,"38836669921875":32,"3886702e":14,"38867098e":14,"389":23,"38933168":37,"39":32,"39024415e":[4,18],"3902442e":18,"39025169e":[4,18],"39030059e":18,"39030147e":18,"39046200e":18,"39046240e":18,"39065126389346":6,"3914518356323242":32,"3925661e":14,"39256693e":14,"393228530883789":32,"3937975e":35,"3939":26,"394687012529754":36,"394687012529765":36,"395773718546492e":3,"39710821e":16,"39731191":37,"39763754":37,"39830644":37,"39898792":37,"3c59201b940f410fa29dc71ea9d5767d":20,"3rd":61,"4":[2,3,4,8,9,10,11,12,13,14,15,16,18,19,21,22,23,30,31,32,34,35,37,39,40,43,45,47,50,52,54,56,58,60,61],"40":[5,6,7,10,12,13,14,17,22,24,25,26,32,39,43],"400":[43,53],"40011111":37,"401567459106445":32,"40159237e":18,"40159293e":18,"40214871":37,"40215457":37,"4028234663852886e":61,"40399802e":18,"40399894e":18,"40470337e":13,"4047034e":13,"405712":31,"4057125":31,"407014846801758":37,"407015":37,"40973541":37,"4098304":33,"41":[32,52],"41410101e":13,"4141011e":13,"41481694":37,"415436267852783":32,"4167706966400146":32,"4171":6,"41836425":37,"4198070168495178":32,"42":[32,43,44,53],"4203097999999983":5,"42040539e":18,"42040920e":18,"4207266":37,"4211851e":13,"42118544e":13,"42174651":37,"42215998":37,"4225693":37,"42313566":37,"4242255687713623":32,"42422866821289":17,"4288432":37,"4289":40,"4290924":37,"42946":16,"42951685190200806":32,"42988910e":16,"43":[32,39],"43000007":39,"43000025":39,"4305":5,"43194777":37,"4319816":16,"4321798872486577":43,"43237329":52,"4323732931220851":52,"43272989":37,"43336441":24,"4333644211292267":24,"433489799499512":32,"434":[7,27],"4343386":37,"43501505":12,"43501514":12,"43563509":37,"43619989":37,"43634558":37,"4372":17,"43805":40,"4383972883224487":32,"43841018":[45,46,48,49],"44":32,"4401196837425232":26,"44057865":37,"44150237":37,"44311693":37,"444":[42,55],"44451588":37,"4445734":37,"44537946e":18,"44538953e":18,"446":40,"446793799521402e":15,"44687837":25,"4468783736228943":25,"44867":40,"44949426e":47,"4496996":37,"45":[30,32],"45039011e":18,"45039046e":18,"4509795":37,"4522":5,"45283591":37,"453676e":30,"4542873":33,"45456872":37,"4585":40,"45885217":41,"4592312e":13,"45928736e":13,"45930032":37,"45982568":37,"46":[32,40],"460298":44,"46029800176620483":44,"46207":40,"46246781":37,"464239597320557":32,"465697705745697":17,"46773":40,"46794":40,"46831572e":18,"46831581e":18,"46856597e":14,"4685675e":14,"4686027":33,"47":32,"47017911":37,"47096547":37,"47109224":37,"47186923e":18,"47187038e":18,"472864151000977":32,"47633229":37,"47648491e":[4,18],"47648956e":[4,18],"4764896e":18,"477215766906738":32,"479":36,"47945627":37,"47962495":37,"48":32,"48150581":37,"4824":61,"48244872e":14,"4824489e":14,"48285941":37,"48418961":37,"484377840758725e":15,"48549134e":[4,18],"48550355e":[4,18],"4855036e":18,"48627452e":[4,18],"48627885e":[4,18],"4862788e":18,"48873673":37,"48958":40,"49":32,"49122":44,"49297488e":[4,18],"4929749e":18,"49298735e":[4,18],"4934":[13,14],"49518379":37,"4956208":40,"4969846103737152e":29,"497":[24,27],"498035430908203":32,"49810802":37,"49874494":37,"499":40,"499288818100467e":15,"49931367":37,"4e":12,"5":[0,2,3,4,5,6,7,8,10,12,13,14,15,16,17,18,19,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,52,54,57,61],"50":[16,26,30,32,39,43,52,61],"500":[8,11,35,43,53,61],"50054672":37,"500636e":43,"50081008":37,"50244932":37,"504":40,"50401564":37,"5042473":33,"50497262":37,"50499":40,"50528631e":[4,18],"5052863e":18,"50528981e":[4,18],"50691":40,"50727105e":18,"50727140e":18,"50806":40,"50958":29,"50958206":29,"51":[25,32],"511":[47,55],"51357":40,"5140781740836594e":29,"51414747":37,"514410972595215":32,"514770030975342":32,"51502740e":14,"5150274e":14,"517":[5,27],"51707":40,"51850037":37,"52":32,"520071029663086":32,"520229339599609":32,"52111756":[45,46,48,49],"52205472":37,"52213719":45,"52274684":37,"52309257":37,"52313867":37,"52388423e":14,"5239261e":14,"52511757":37,"52523323":37,"52584857":41,"52589117":42,"5258911848068237":42,"527759552001953":32,"528":[21,27,35],"52936423":37,"52940059e":18,"52940090e":18,"5295":40,"53":32,"53103908":37,"5328559279441833":32,"53658932":37,"53979211":37,"54":32,"540437698364258":32,"541445255279541":32,"5425593852996826":32,"54267314e":18,"54267330e":18,"544509":3,"545531":31,"54566121e":18,"54566122e":18,"54698303":37,"547":[26,27],"547382e":43,"54896332":37,"5499999523162842":19,"54it":52,"55":[32,39,40,54],"55013293":37,"55018391":37,"552550792694092":32,"5525559000000015":4,"55292557":37,"5544168949127197":32,"55559903":37,"556101":40,"55619573":37,"5563672780990601":21,"55758166e":47,"55765799e":[4,18],"55766493e":[4,18],"5576649e":18,"5598803162574768":26,"56":[32,39],"56006248":37,"56012595403854e":47,"561":[18,27],"56194325":[45,46,48,49],"56227":40,"56280964":37,"5639830e":35,"56451812e":16,"564691543579102":32,"56489146":37,"5652633":37,"56550723e":13,"5655078e":13,"56583008e":18,"56583288e":18,"566":[17,27],"56679427":37,"567":[29,55],"5678145":37,"5678393840789795":32,"5683971047401428":32,"5691276":37,"56952":40,"5696":54,"56987887":37,"56999975":39,"56999993":39,"56it":52,"57":[32,39,44,61],"570":[26,40],"57067982":37,"57161808013916":32,"5720632":37,"572309494018555":32,"57332":40,"57574":40,"57612565e":13,"5761261e":13,"57658655":37,"5770088e":11,"577021":37,"5772156715393066":32,"578007302479818e":8,"5786123":33,"57935402":37,"5798376202583313":21,"58":[32,43,44],"58136629":37,"581423":37,"58147273":37,"581592":52,"58203512":37,"58383257":37,"58384385e":18,"58384484e":18,"58633137e":18,"58633217e":18,"58648502":37,"587768077850342":32,"58815834":37,"5885818004608154":32,"58928575":37,"59":32,"5903606":37,"59100433e":18,"59100525e":18,"5919868e":38,"59245609e":13,"5924566e":13,"5928172":33,"594129":37,"59464":40,"59543345e":18,"59543371e":18,"596":40,"596368312835693":32,"59697":40,"5973891019821167":21,"59803428e":18,"59803450e":18,"59856561":37,"59973228":37,"599792":40,"5f":61,"6":[2,7,12,13,15,16,18,19,22,31,32,36,37,39,40,43,45,46,47,48,49,54,59,61],"60":[32,39,40,43],"6010535955429077":21,"60185594":37,"602":[23,37,55],"60362":40,"6037":7,"6042651690153994e":36,"60454853e":18,"60454881e":18,"60455":40,"60569829":37,"606118202209473":32,"60615351":37,"60754149e":18,"60754204e":18,"607797145843506":32,"6078912":17,"608":[34,55],"60934":40,"609598636627197":32,"61":32,"6113818":33,"61176586151123":32,"6121127":12,"6142144203186035":32,"61446":40,"6172231e":35,"61898054e":18,"61898088e":18,"61919039e":18,"61919057e":18,"62":32,"620":[44,55],"62114":40,"62244260e":18,"62244448e":18,"6230743":34,"62307472":34,"6230748":34,"62389817":37,"6241708e":35,"62558995":37,"62749566":37,"62771445":37,"62868121":37,"63":32,"63034505":37,"63079314":37,"63098":40,"63278937e":18,"63278986e":18,"63308799e":18,"63308841e":18,"633615970611572":32,"634060382843018":32,"6342":40,"63475229":37,"63479061e":18,"63479507e":18,"63528":40,"6360930e":35,"63719075":37,"637514114379883":32,"63865558":37,"63901585e":18,"63901656e":18,"63954211":37,"6399997":39,"64":[32,39,44,61],"64007308":37,"64070463180542":32,"642235025973605e":29,"64241":40,"64273089":37,"6428166":37,"6447601":33,"644931316375732":32,"64554977e":18,"64555024e":18,"6469999999889637e":3,"6470029":37,"64772188e":18,"64772224e":18,"648913860321045":32,"65":32,"65019824":37,"6509266":[45,46,48,49],"65198444":37,"6532766819000244":32,"654664063492684e":3,"656536102294922":32,"656836477399338e":15,"657":[30,55],"65719735e":18,"65719874e":18,"6585561e":35,"65990937e":18,"65991552e":18,"66":[26,32],"66051938":37,"66294828":37,"66362762e":18,"66363079e":18,"66380644e":18,"66454355":37,"66464688e":18,"6647287492528138":43,"6652":40,"66826437":37,"66956298e":18,"66956686e":18,"67":[32,43],"67055552":37,"672993183135986":32,"6732":61,"673200011253357":61,"6732631921768188":61,"67326319217681884765625":61,"6732632423543772848170429916717":61,"673324704170227":32,"67405555":37,"6741490364074707":32,"67426785":16,"67578773":37,"676":40,"67779351e":18,"67779541e":18,"67876234":34,"6787634":34,"67878895e":18,"67878950e":18,"67990695":37,"67it":3,"68":[32,39],"680880546569824":32,"6812":7,"68228708":16,"68234835":37,"68255837":37,"68291281":37,"68305664":37,"684":[33,55],"68405616":16,"68407345":37,"68412563e":47,"685":53,"6851817965507507":32,"6854097541217925e":15,"68567":40,"68619":40,"68658071":37,"6867028e":13,"68670320e":13,"68907313":37,"68918985e":18,"68918991e":18,"69":[26,32],"6900":10,"6911876797676086":21,"69140864":37,"69334602355957":32,"69480008":37,"69480186":37,"695":26,"69543815e":18,"69544780e":18,"6962714195251465":32,"6982718e":35,"6985312700271606":21,"699055552482605":32,"6997":40,"69971891":[45,46,48,49],"7":[0,2,3,4,6,11,14,15,17,18,19,21,22,23,24,30,31,32,35,37,40,43,47,52,54,57,61],"70":[26,32],"70011145":37,"70107684e":18,"70108199e":18,"70128288":37,"70316642":37,"70529895":37,"70797":16,"70805502e":18,"70805526e":18,"7086":17,"70903280e":18,"70903301e":18,"7099996":39,"71":[26,32,39],"710877418518066":32,"71100584":37,"711697101593018":32,"711794912815094":32,"712230":40,"71364075":26,"713846e":43,"71414169e":47,"71450677e":18,"71450746e":18,"71459904e":18,"71459913e":18,"71520233e":18,"71520266e":18,"7158216e":35,"71597913":37,"718":61,"718491554260254":32,"71909321":37,"719284534454346":32,"71964918":37,"71993637084961":32,"72":[26,32],"7204409":35,"7204409241676331":35,"720792293548584":32,"72275914":37,"72324305":37,"72388908":37,"724166":31,"7241662":31,"72431414":37,"7256362":37,"72563625":37,"72581017":37,"72592116":37,"7264933586120605":32,"727372":37,"728328227996826":32,"7284168e":35,"72871654e":47,"73":[26,32],"73017167":37,"7301913e":38,"73135":40,"732256889343262":32,"73236":40,"73372186e":18,"73372230e":18,"7347797155380249":17,"735946178436279":32,"736152172088623":32,"73653572":37,"7366836857490853e":15,"73683":40,"73695993e":18,"73696034e":18,"737":65,"738556385040283":32,"73938966":[45,46,48,49],"73950393e":18,"73950435e":18,"74":[32,39],"740395545959473":32,"74122911e":18,"74122953e":18,"74171089":29,"741714":29,"74392898":37,"74433438":16,"7449":40,"74517066":37,"74524172":16,"74524173":16,"74534286e":47,"7455989000000045":43,"7465746402740479":32,"74676971e":18,"74676972e":18,"74727":44,"74826":40,"74891722e":18,"74891758e":18,"749999999973056e":3,"75":[3,13,14,19,26,30,32,43,52,61],"75025306":37,"75049044":37,"75119957e":18,"75120188e":18,"75211692":37,"7539635":37,"75628328e":18,"75628331e":18,"75673":40,"75685548e":[4,18],"7568555e":18,"75686325e":[4,18],"758":45,"75915071":37,"75it":3,"76":32,"76204203":37,"76242411":37,"7624385":41,"76272437":37,"76353654":37,"76558228":37,"76837135":37,"76858489":37,"76885583e":18,"76885669e":18,"77":32,"77001225e":47,"77003779":37,"77087912":37,"7715491999999813":43,"771734169003065e":36,"7733329e":13,"77334572e":13,"77340222":37,"77360088":37,"7736712694168091":32,"77550662":37,"78":[26,32,39],"78000003":39,"78040093e":18,"78041705e":18,"781":[6,27],"781283378601074":32,"78137328":37,"7825927734375":32,"783455848693848":32,"78696918e":18,"78697129e":18,"78993078":37,"78999385":37,"79":32,"79002563e":47,"7901466e":14,"79014703e":14,"79087008":[45,46,48,49],"79101156":37,"79148276e":47,"79334275":37,"7944385":33,"7959419429967056e":43,"795942e":43,"7960496":5,"79604962":5,"7969924":37,"797518253326416":32,"79894984e":18,"79895009e":18,"799262827148709e":37,"79967007":37,"79992473":37,"8":[0,2,3,4,11,12,13,14,15,17,18,19,22,26,32,37,38,39,40,43,54,57,60,61],"80":[32,39,43],"800000011920929":19,"800005":52,"80138328":37,"80192":37,"8022861":37,"8024351":37,"8040383":45,"80438695":37,"80464":40,"80545775":37,"8060846328735352":8,"8060857":8,"80620352":37,"80716295e":14,"80791629":16,"808129":16,"80853732":[45,46,48,49],"8086333e":14,"80966594":37,"81":[30,32],"8119":40,"81211172":37,"81289907":37,"813019752502441":32,"814":[4,9,18,33],"81423561":37,"815401077270508":32,"81570991e":18,"81571263e":18,"81655731":16,"81736946e":18,"81737794e":18,"81811365e":18,"81811404e":18,"81855214":37,"82":32,"8262804":33,"82637993":37,"82688169":37,"82714128e":18,"82714171e":18,"8277298e":35,"82866407":37,"82919535e":18,"82919696e":18,"83":[32,43],"8306964":12,"83187956":37,"83298461":37,"83455725":45,"835":[36,55],"83618809":37,"8379581":16,"839":[16,27],"839216232299805":32,"83950":16,"84":32,"8403653e":13,"84053760e":13,"84071350097656":17,"840921740698832e":29,"841617584228516":32,"84174":40,"84221713":37,"8425":5,"84250874":37,"84302072":37,"84333333":45,"8436663":37,"8438695":16,"8455521":37,"846":21,"84715396":7,"847154":7,"8471540212631226":7,"848000":52,"84848":29,"84848608":29,"84944":40,"85":32,"850505828857422":37,"850506":37,"85116384":37,"85326266":37,"8534650802612305":32,"85557765":33,"85595278e":18,"85595427e":18,"856705665588379":32,"85678768":29,"8568":29,"85719428":37,"8577008843421936":11,"8577009":11,"8584946990013123":32,"86":[32,39,40],"860327":16,"86115":40,"86138575":37,"86331793":8,"8633179664611816":8,"8639233":37,"864675998687744":32,"86560047":37,"86642575":37,"86642713":37,"86706182":37,"86711784":37,"86741369":45,"86756462":37,"86775194e":18,"86775268e":18,"86953764":37,"8697345e":35,"87":32,"87010169e":18,"87010227e":18,"87032467":16,"87076527":37,"87091921":16,"8716":40,"87444773e":18,"87444919e":18,"87494798":37,"87600833":37,"87647831e":18,"87647893e":18,"87652483":37,"876655828dca47f4a6a31b60bdccb2e1":19,"8773022904715617e":36,"8773623704910278":7,"87736238":7,"8773624":7,"87747496":35,"8774749636650085":35,"87815407":37,"878242":5,"87824499":5,"8794":40,"88":[29,32],"8817291e":35,"88268626":33,"88506":40,"8867769":5,"88677702":5,"88685031":37,"88790704e":18,"88790754e":18,"888":27,"88899057e":47,"88933627":17,"89":[32,39],"89079656":37,"893578":37,"89360823":37,"8940267298169502":21,"8940268":21,"89643029":37,"89718302":16,"89721622":37,"89790391":16,"89828815":37,"89835633":37,"8999999761581421":61,"8bit":61,"9":[4,8,11,12,13,14,17,18,19,22,30,32,35,37,38,43,47,54,61],"90":[29,32,40],"90050155":33,"90068117":52,"9006811702978088":52,"9006812":37,"9016947":52,"9016947018779491":52,"90181942":37,"90217633":37,"9023882e":35,"9024":40,"90251":40,"90265503e":47,"902980e":30,"90334939956665":32,"9034561":37,"90474751e":18,"90474844e":18,"905197620391846":32,"90657":40,"90819174":8,"9081919193267822":8,"90871872":37,"90927099":37,"90929934e":18,"90930400e":18,"90931811":37,"90967607e":18,"90967608e":18,"9099":12,"91":32,"9101138e":13,"91011448e":13,"9103173":37,"910524368286133":32,"91088793":16,"91143692":37,"912094":40,"91237196e":18,"91237218e":18,"9129023551940918":32,"91291516e":18,"91291523e":18,"91538879":37,"91543245":5,"91543293":5,"91606802":37,"91632481e":18,"91632557e":18,"91688379":37,"91690629":37,"918":40,"91900236":[45,46,48,49],"92":[32,43,44],"92136662":37,"92139":40,"9214405e":14,"92159916":37,"92228310e":14,"92293569":37,"92503544":37,"92597852":37,"9260735511779785":32,"92610748":37,"9261332e":11,"92676767e":18,"92676806e":18,"92683189":37,"9272565841674805":32,"92839122":37,"92943813":37,"92957":40,"93":32,"93043986":37,"931":[3,27],"93109126e":18,"93112069e":18,"93117407":37,"93290167":37,"933238983154297":32,"93357383":37,"933687210083008":32,"935":[4,23,27],"93652687":45,"9365387e":14,"93713001e":14,"93839428":37,"94":[32,44,61],"940292835235596":32,"9403862953186035":32,"942":[52,55],"94218111038208":32,"94252732":45,"942908763885498":32,"94324481":37,"94518173":37,"9452659":37,"9470133649084751":21,"9470134":21,"94990512":37,"949999809265137":19,"95":32,"95051435e":18,"95051479e":18,"9516184329986572":2,"9518465399742126":8,"95222508":37,"95372":40,"956035614013672":37,"956036":37,"9575648":37,"95899960e":[4,18],"95900075e":[4,18],"9590008e":18,"95959623":5,"9595971":5,"96":32,"961411":31,"961591e":43,"96833851":37,"968691349029541":32,"96885287":37,"96966239":37,"97":32,"970173358917236":32,"970326":37,"97046089e":18,"97046115e":18,"97232682":37,"9731414e":35,"97369206":37,"97564407":37,"97574326e":18,"97574356e":18,"9760862":37,"9761691e":13,"97651549e":13,"97685":37,"97701819e":18,"97701883e":18,"979":[22,27],"9795537":37,"98":[32,44],"9800849e":38,"98021229":37,"98053107":37,"980844497680664":32,"98148614e":[4,18],"98151529e":[4,18],"9815153e":18,"98175664":37,"982464":40,"982552528381348":32,"98294137":37,"98303620e":18,"98303699e":18,"9841911999999979":4,"9848723332927146":21,"98487234":21,"9857951":33,"98735":40,"98789866":37,"98813829":37,"98819451":16,"9882327318191528":32,"988456890307134e":6,"98849304":37,"98960096e":[4,18],"98961184e":[4,18],"9896118e":18,"9898257":37,"98982572555542":37,"99":[29,32,61],"99023912":37,"99062501e":[4,18],"9906250e":18,"99063495e":[4,18],"9915334e":38,"991699473881454e":11,"9916995e":11,"9917594790458679":32,"99197553":37,"9921055":37,"9926133":11,"9926133155822754":11,"99275191":37,"99422852":37,"99446269":37,"99545940e":14,"9954606e":14,"99604493":37,"99604549":37,"99645552":37,"99683897e":47,"99796537":37,"999":61,"9990000128746033":61,"99904370e":14,"9990449e":14,"99968581":34,"9996865":34,"9999734163284302":15,"99998536e":[4,18],"99998569e":[4,18],"9999857e":18,"9999914169311523":15,"9999918341636658":15,"99999285e":[4,18],"9999928e":18,"99999301e":[4,18],"999999747378752e":61,"9999999":3,"999999974752427e":61,"9_":59,"9f":61,"abstract":0,"boolean":61,"break":26,"byte":[0,15,40],"case":[0,5,7,16,21,31,32,34,36,39,40,44,45,46,47,49,53,54,57,58,59,60,61],"char":59,"class":[0,4,10,12,13,14,15,16,18,19,22,25,26,31,35,36,45,46,47,48,49,50,52,54,60,61,65],"const":61,"default":[0,6,8,11,12,16,25,26,32,33,41,44,45,48,49,52,56,58,59,60,61],"do":[0,5,7,12,16,17,19,20,24,25,32,34,36,37,40,42,43,44,47,50,51,52,59,60,61],"dupr\u00e9":56,"enum":61,"final":[0,4,7,10,11,12,17,18,26,29,35,40,53,61],"float":[0,6,7,13,15,16,17,19,22,28,29,31,32,39,40,43,44,45,46,50,53,55,57,61,63],"function":[0,3,10,12,13,14,16,21,22,25,30,33,36,37,38,39,41,44,45,46,47,50,51,56,58,59,60,61,65],"import":[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,53,54,56,57,58,60,65],"int":[17,19,22,32,61],"long":[24,25,42,44],"new":[3,10,13,14,16,21,24,25,28,31,32,33,46,51,52,53,55,56,57,61,64,65,66],"null":[36,52],"public":0,"return":[0,3,6,7,8,10,11,12,13,14,15,16,17,21,22,26,29,31,32,33,35,36,37,40,41,45,46,47,48,49,50,52,53,54,56,60,61],"short":[0,31],"static":61,"switch":[0,2,28,43,44,55,57,59,63],"throw":61,"true":[0,5,6,7,8,12,17,19,24,25,29,30,31,33,34,36,40,42,44,45,46,48,49,50,52,53,54,59,60,61],"try":[0,5,7,13,14,15,16,17,22,24,25,26,32,34,36,39,40,44,45,47,48,49,52,53,60,61,65],"var":[12,19,61],"while":[0,31,33,41,61],A:[0,6,8,9,11,13,14,16,19,28,29,32,33,35,36,40,41,43,44,52,55,57,60,61,66],And:[6,7,12,13,14,16,17,19,24,25,26,37,39,40,42,44,49,52,53,54,61],As:[16,60,61],At:[58,61],But:[0,22,33,36,39,45,46,53,54],By:[6,8,11,25,33,35,44,45,49,59,61],For:[0,61],If:[0,13,14,16,19,22,25,37,43,45,47,50,52,59,60,61,65],In:[0,19,33,36,44,45,46,47,49,50,53,54,58,61],Is:36,It:[0,7,12,13,14,15,16,17,18,19,20,21,22,25,26,29,31,32,33,36,37,38,39,40,41,43,44,45,46,47,49,50,52,53,54,56,58,59,60,61,64,65,66],Its:61,NO:[4,9,18,33],No:[4,53,61],Not:[17,61],One:[6,17,28,45,47,55,57,60,61,63],Or:[59,61],That:[0,7,11,12,13,14,16,23,25,30,31,32,33,35,36,37,39,44,49,50,58,59,60,61,63,66],The:[0,2,3,4,5,6,7,10,11,12,13,14,15,16,17,18,19,21,22,24,25,26,29,30,32,33,34,35,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,57,58,59,60,61,65],Then:[12,13,14,17,23,24,25,33,37,53,58,61],There:[4,7,16,17,18,21,23,26,32,33,36,37,46,50,51,58,61],These:[0,59,61],To:[0,25,36,43,44,47,61,65],Will:[6,12],With:[20,26,61],_2:61,_3:12,_4:12,_:[6,26,33,36,40,41,43,44,52,57],__class__:[4,31,45,46,47,48,49,50,53],__init__:[10,12,13,14,45,46,47,48,49,50],__main__:[13,14,45,47],__max_supported_opset__:56,__name__:[50,52,57],__version__:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,35,43,57],_alter_model_for_debug:60,_as_tensor:61,_check_optimize_result:[4,9,18,33],_class:19,_contain:[0,26,59],_converter_pool:33,_debug:[4,18,37,60],_get_value_ndarrai:36,_label:41,_linearclassifi:41,_logist:[4,9,18,33],_normal:41,_pars:[0,35],_parser:0,_pca:[4,18],_probability_tensor:41,_probabl:41,_registr:[12,13,33],_sag:38,_sklearnlinearclassifi:41,_to_onnx:61,_topolog:[0,32,59],_tree:36,_twenty_newsgroup:26,_unus:61,_update_domain_vers:19,_y:12,a024deb0585b4b66881bcf336999d8a6:19,a1:6,a2:6,a_0:61,a_1:61,a_2:61,a_:61,a_k:61,a_n:61,a_scal:61,a_zero_point:61,ab:[6,16,29,36,37,43,45,46,47,48,49,50,52,53,61],abl:[50,60],about:[12,30,45,46,47,53,58,59,61],abov:[13,14,21,53,61],absolut:[29,61],absolute_error:61,accept:[7,17,61],accept_spars:61,access:[60,61],accord:[0,61],accross:[46,60],accumul:61,accuraci:26,aco:61,acosh:61,across:61,act:61,activ:61,actual:[12,15,17,26,45,57,61],ad:[0,10,28,32,33,35,43,45,46,47,48,50,52,55,57,59,61,64,65,66],ad_add1:37,ad_add:[23,37],ad_addcst:[23,33,37],ad_c01:37,ad_c0:37,ada_boost:46,adaboostclassifi:[33,61],adaboostregressor:61,adagrad:61,adam:61,adapt:61,add:[0,5,12,13,14,19,22,23,31,32,33,34,37,45,47,61,65],add_artist:12,add_ind:61,add_initi:[0,13,46],add_input:0,add_nod:[0,12,13,46],add_output:0,add_output_initi:38,add_to:[14,31,45,47,48,49,50,53],addit:[7,16,43,50,59,60,61],additivechi2sampl:61,address:[21,59],adjust:[12,13,61],administr:[25,44,52],advanc:57,affin:61,affinitypropag:61,after:[7,17,43,58,60,61],ag:[7,17],again:[7,13,14,16,40],against:[43,61],age_cast:17,agglomerativeclust:61,aggreg:61,aggregate_funct:61,agnost:61,agre:[26,36],ahm:56,ai:[0,3,12,17,19,21,22,23,24,25,32,42,43,44,50,52,53,61],aic:61,aidan:56,aim:22,ak:43,aka:61,albu:40,alexnet:61,algebra:[3,10,14,21,22,23,31,45,47,48,49,50,53,61],algorithm:[50,60,61],alia:[0,12,13,14,19,31,49,53,60],align:61,align_corn:61,all:[0,1,5,12,15,18,28,31,33,39,50,52,53,54,56,59,60,61],all_except_channel_index:61,all_model:60,all_opt:33,alloc:[50,61],allow:[0,12,36,38,50,61],allowzero:[32,61],almost:[16,26,29,59,61],alon:61,along:[36,52,61],alpha:[16,40,45,46,47,48,49,50,61],alpha_1:61,alpha_2:61,alpha_init:61,alpha_per_target:61,alphabet:61,alreadi:[0,40,47,60],also:[0,2,4,9,18,19,22,32,33,36,38,39,50,52,53,58,59,60,61],alt:26,alter:[37,59,60],altern:[1,4,9,18,27,33],alwai:[11,36,58,59,60,61],ambigu:[50,52,59,61],ambiti:51,american:40,ami:61,among:[15,56],amount:61,amuel:[7,17],an:[0,2,4,5,7,8,10,12,13,14,15,16,18,19,21,22,23,25,28,29,30,31,32,33,34,36,37,38,43,45,46,47,48,50,54,55,56,57,58,59,60,61,63,65],analog:61,analys:59,analyz:61,angl:40,ani:[0,12,13,14,15,19,31,33,40,45,46,50,59,60,61],annot:40,annotationbbox:12,anomali:61,anoth:[0,6,13,14,25,36,37,38,40,45,47,58,61,65],answer:[40,53],anti:26,anyhow:31,anymor:[10,16,50],anyon:26,anyth:50,apach:56,api:[2,13,14,24,25,40,42,44,45,47,56,60],appear:[31,44,61,67],append:[3,7,12,13,14,17,21,30,31,36,40,43,48,49,52,53,61],appli:[7,12,17,21,26,33,50,52,59,61],applic:[58,61],apply_cast:13,apply_great:13,apply_ident:13,apply_sub:46,appropri:[28,45,46,55,57,63],approxim:[12,16,21,61],ar:[0,4,7,11,12,13,14,16,17,22,26,29,30,32,33,35,36,37,39,40,45,46,47,48,49,50,52,56,58,59,60,61,62,66],ar_argmin:37,ar_arrayfeatureextractor1:21,ar_arrayfeatureextractor:21,ar_arrayfeatureextractorcst1:21,ar_arrayfeatureextractorcst:21,ar_z02:21,ar_z0:21,arang:[10,22,24,25,42,44,52,54],arbitrari:61,arc3:40,arccosin:61,arcsin:61,arctang:61,ardregress:61,area:36,area_mismatch_rul:36,arg0:39,arg1:39,arg2:39,arg:61,argmax:61,argmin:[32,37,61],argument:[29,39,58,61],arithmet:61,arpack:60,arrai:[0,2,3,7,12,14,15,16,18,21,22,23,26,28,31,33,35,37,39,40,41,50,52,53,55,57,60,61,63,65],arrang:61,arrayfeatureextractor:[21,61],arriv:61,arrowprop:40,arrowstyl:40,artif:52,arxiv:61,asarrai:40,ascend:61,asin:61,asinh:61,ask:17,assert:[4,18,45],assert_almost_equ:[7,45,47,50,60],assign:[0,61],associ:[0,13,14,19,24,25,33,42,44,45,47,54,61,65],assum:[0,6,16,21,26,36,43,45,49,58,61,65,66],assume_finit:30,assumpt:[6,36],astyp:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,24,25,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,58],asymmetr:61,atan:61,atanh:61,atheism:26,att:50,attach:[0,66],attent:[45,47],attr:[0,12],attribut:[0,3,12,17,18,19,22,32,45,46,47,48,49,50,60,61],attribute_axi:61,attribute_sort:61,attributeerror:[12,60],author:61,auto:61,auto_exampl:27,auto_examples_jupyt:1,auto_examples_python:1,auto_pad:61,auto_tutori:55,auto_tutorial_jupyt:28,auto_tutorial_python:28,automat:[0,50,54,56],avail:[0,5,7,17,22,30,32,34,35,56,60,62],averag:[3,12,30,35,45,46,47,48,49,61],averagepool:61,avg:[26,61],avlog0_add:32,avlog0_c01:32,avlog0_c0:32,avlog0_mul:32,avlog1_add:32,avlog1_c01:32,avlog1_c0:32,avlog1_mul:32,avlog2_add:32,avlog2_c01:32,avlog2_c0:32,avlog2_mul:32,avoid:[45,46,47,48,49,50,52,60,61],avpl0_add:32,avpl0_c0:32,avpl1_add:32,avpl1_c0:32,avpl2_add:32,avpl2_c0:32,awai:61,awar:[61,66],ax:[5,6,7,10,12,13,14,17,22,24,25,26,29,32,33,34,36,37,39,40,42,43,44,45,47,48,49,50,54,61],axessubplot:[12,40],axi:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,32,39,45,46,48,49,50,52,53,54,61],axis_1:61,axis_i:61,axis_j:61,axis_m:61,b:[17,19,36,39,53,59,61],b_in:61,b_out:61,b_scale:61,b_scan_out_1:61,b_scan_out_k:61,b_zero_point:61,back:[35,38,61],backend:[1,22,27,56,59,60],backward:61,bactch:61,bad:15,baggingclassifi:[33,61],baggingregressor:61,ball:40,base:[0,6,10,12,13,14,22,26,36,45,46,47,48,49,50,58,59,61],base_estim:61,basedecisiontre:61,baseensembl:61,baseestim:[10,12,13,14,26,45,46,47,48,49,50],baselin:43,basic:[6,9,12,16,19,22,46,61],basic_linear_algebra_subprogram:61,basicconfig:[0,19],batch:[0,30,45,46,58,61],batch_axi:61,batch_dim:61,batch_id:61,batch_index:61,batch_indic:61,batch_siz:[30,61],batchnorm:61,bath:40,bayesiangaussianmixtur:[33,61],bayesianridg:[33,61],bbox:40,beacon:40,becam:32,becaus:[4,7,13,14,16,17,18,26,32,35,36,44,52,59,61],becom:[39,43,46,58,61,64],been:[33,59,61],befor:[7,10,12,17,23,29,36,40,50,60,61,62],beforehand:[7,17],begin:[19,61],behav:[13,14,61],behavior:[25,44,52,61],behaviour:[5,8,11,26,34,53,59,61],being:[0,13,14,19,25,26,43,45,47,61,65],belong:[30,54],below:[0,4,7,17,18,32,36,45,47,56,59,61],bench:40,benchmark:[1,27,28,55,57,63],bernoulli:61,bernoullinb:[33,61],bernoullirbm:61,best:[26,61],beta:[48,49,61],beta_1:61,beta_2:61,beta_adjust:61,better:[16,22,33,35,36,37,43,52,61],between:[0,2,3,12,36,60,61],beyond:61,bfloat16:61,bfloat:61,bia:61,bicub:61,bidirect:61,big:[12,22,30,50],bigger:16,biggest:[16,29],bigotri:26,bilinear:61,bin:61,binar:61,binari:[21,54,61],bind:61,binder:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],birch:61,bit:61,bitshift:61,bitwis:61,black:[0,5,28,55,57,61,63],black_op:[0,5,34,61],blacklist:[0,5,34,61],blob:[12,61],block:61,blocksiz:61,boat:[7,17],bod:26,bodi:[7,17,26,61],body_bow:26,body_stat:26,bold:12,bool:61,bootstrap:[53,61],bootstrap_featur:61,border:61,bost:16,boston:16,both:[0,17,29,36,45,48,49,59,61],bottom:40,bought:21,bound:[36,61],boundari:61,box:61,box_index:61,boxstyl:40,br:61,branch:61,branch_eq:61,branch_gt:61,branch_leq:[19,32,61],branch_lt:61,branch_neq:61,break_ti:[32,61],bridegroom:40,briefli:58,bring:26,broadcast:61,broadcast_to:61,browser:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],bst:44,bst_1:61,bst_n:61,build:[0,6,26,36,40,45,60],built:[36,37],bytesio:[40,45,47,50],c09732614109405b8d4e48e8307c0be7:19,c1:36,c2:36,c:[2,7,12,13,14,17,25,30,36,38,39,44,51,52,53,60,61],ca_output0:33,cabin:[7,17],cacatua:40,cach:37,cache_s:61,cacul:61,calcul:[0,12,13,14,24,25,42,43,44,45,46,47,53,61,65],calculate_linear_classifier_output_shap:[0,24,25,42,44,52,60],calculate_linear_regressor_output_shap:[43,44],calibr:61,calibratedclassifiercv:[33,61],call:[0,10,12,14,16,19,32,37,47,50,52,58,59,60,61],call_convert:19,callback:61,can:[0,2,7,10,12,13,14,15,17,18,22,24,25,26,29,31,32,33,35,36,37,38,39,40,42,43,44,45,46,47,48,50,51,54,56,58,59,60,61,62,63,65,66],cannot:[0,3,4,7,15,17,18,32,44,60,61],canon:61,capi:[15,24,39,40],captur:37,car:40,carri:61,case_change_act:61,cast1:[6,17],cast2:[6,17],cast3:17,cast4:17,cast64:[6,36],cast:[0,6,12,17,19,31,32,36,52,60,61],cast_to:61,castlik:61,castmap:61,castregressor:61,casttransform:[6,52,61],casual:26,cat1:39,cat2:39,cat:[7,17,31,39,60],cat_col:39,catch_warn:52,categor:36,categori:[26,31,61],categorical_encod:[28,55],categorical_featur:[7,17,60,61],categorical_transform:[7,17,39,60],categoricalencoderordinalencod:31,categoricalencoderwoeencod:31,categoricalnb:[33,61],categories_:31,category_encod:31,categorymapp:61,cats_int64:61,cats_str:17,catwo:31,caus:61,cc:[25,44,52,61],cca:61,cco:26,ccp_alpha:61,cd_cdist:3,cdist:[1,27,33,59],ceil:61,ceil_mod:61,cell:61,celu:61,center:[12,61],center_point_box:61,certain:61,chain:[4,18],chang:[0,3,4,5,6,7,8,16,17,18,25,28,32,34,36,40,44,46,50,52,55,57,59,60,61,65],change_batch_s:40,channel:61,channel_first_input:0,char_wb:59,charact:[59,61],check:[0,4,7,10,12,13,14,16,22,31,37,40,45,46,47,54,56,61],check_input_and_output_numb:[0,31],check_input_and_output_typ:0,check_invers:61,check_model:22,checker:22,chin:56,choic:[33,35,56,61],choos:[0,28,50,55,56,57,61,63],chosen:61,chunk:61,church:40,cl:[32,52],class_:61,class_id:[19,61],class_index:61,class_nam:40,class_nodeid:19,class_prior:61,class_siz:61,class_treeid:19,class_weight:[19,60,61],classes_:[13,14],classes_str:61,classic:61,classif:[40,61],classifi:[0,1,7,8,11,12,17,18,26,27,28,29,33,45,49,55,57,60,61,62,63],classification_report:26,classifier__zipmap:59,classifierchain:61,classifiermixin:[13,14],classlabels_:61,classlabels_int64:[17,19,61],classlabels_int:17,classlabels_str:61,clean:61,clf:[7,17],click:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],climg:40,clip:[53,61],clipped_:53,clone:[12,13,14],close:[12,54],closer:21,clr:[8,9,11,15,19,33,35,41,56,58],clr__raw_scor:33,clr__zipmap:33,clrrf:33,cls_type:[49,53],cluster:[10,37,61],cm:12,cmap:12,cnn:61,co:61,cockatoo:40,code:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,58,60,61,63],coef0:61,coef:[46,50],coef_:[38,45,46,47,48,49,50],coef_left:50,coef_nam:46,coeffici:[16,17,36,45,61],coerc:61,coercion:61,col:[21,31,61],col_index:21,col_indic:21,collect:[0,60,61],collect_intermediate_step:[4,18,37,60],collector:40,color:[5,6,7,10,12,13,14,17,22,24,25,26,54],column:[0,3,7,11,17,24,25,26,33,39,40,42,44,45,47,52,53,54,58,60,61],column_map:31,columntransform:[0,1,17,26,27,39,52,60,61],com:[3,7,12,13,14,17,25,26,40,45,47,61,65],combin:[26,40,61,65],come:[26,50,60,61],comma:61,comment:[4,7,17,18,50],common:[0,3,7,8,9,10,11,12,13,14,15,16,17,19,21,24,25,26,31,32,33,35,36,37,40,41,42,43,44,45,46,48,49,50,52,53,56,58,59,60,61],commonli:58,compar:[1,12,16,27,29,30,33,36,37,48,52,60],compare_object:[4,60],comparison:[36,37,61],compat:61,compil:[2,6],compiled_run:[29,33,36,49],complementnb:[33,61],complet:60,complex128:61,complex64:61,complex:[21,50,57,61],compon:[0,4,18,40,45,60,61],components_:[21,40,47],compos:[7,17,26,39,52,58,60,61],composit:61,compress:61,compromis:36,compur:59,comput:[0,2,3,6,7,16,21,22,29,30,31,33,36,37,43,45,46,47,48,49,50,53,56,58,59,60,61,63],compute_label:61,compute_scor:61,concat1:17,concat2:17,concat:[17,61],concat_result:[17,61],concaten:61,concatfromsequ:61,concept:61,cond:61,condit:61,condition_var:61,conduct:61,config_context:30,configur:52,confirm:[8,11,16],connect:[12,61],connectionstyl:40,consecut:[60,61],consid:[0,5,24,25,26,32,34,42,44,52,61],consist:[40,54,60,61],constant:[7,16,17,22,38,45,46,53,61],constant_valu:61,constantofshap:61,constrain:61,constraint:61,construct:[25,26,44,61],constructor:50,consum:61,contain:[10,12,13,14,18,19,20,25,31,32,33,36,43,45,46,47,48,49,50,51,53,54,58,59,60,61,65],contamin:[53,61],content:[0,23,40,61],context:[3,30,48,61],contigu:61,continu:[7,12,17,31,32,33,36,51,52,60,61],contrast:61,contribut:[13,14,25,45,47,65],control:61,conv:[0,19,61],convent:[33,61],converg:[4,9,18,30,33,38],convergencewarn:[4,9,18,33,38],convers:[0,6,7,11,16,17,25,28,29,31,36,44,50,52,53,55,57,59,61,63],convert:[1,15,18,21,23,26,27,28,30,32,33,36,37,39,40,49,51,52,55,57,63,66,67],convert_dataframe_schema:[7,17],convert_fct:0,convert_lightgbm:[24,42,43,52],convert_oper:19,convert_sklearn:[0,4,7,8,9,10,11,12,16,17,18,19,24,25,26,42,44,56,59,60],convert_sklearn_random_forest_classifi:[0,60],convert_sklearn_text_vector:59,convert_topolog:[0,19],convert_xgboost:[25,44,52],convert_xgboost_boost:44,convet:62,convinteg:61,convolut:61,convtranspos:61,coordin:61,coordinate_transformation_mod:61,copi:[6,11,24,25,33,35,36,37,38,42,44,46,52,61],copy_x:61,copy_x_train:61,core:58,corner:61,correct:0,correctli:[17,61],correl:[45,46,47,48,49,50],correspond:[0,13,14,25,45,47,60,61,65],corrspond:61,cosh:61,cosin:61,costin:56,could:[0,7,17,36,38,40,47,54,60,61,66],count:[52,61],count_include_pad:61,counter:61,counterpart:0,countvector:[33,52,61],coupl:[16,34,61],cours:61,cov:[45,47,50],covari:[45,47,61],covariance_estim:61,covariance_prior:61,covariance_typ:61,cover:[0,13,14,25,45,47,65],coveri:[45,46,47,48,49],cpu:[2,58],crd:61,creat:[0,2,3,8,13,14,16,19,21,22,36,49,50,52,58,59,60,61,65,66],credit:56,crest:40,criterion:61,crook:56,cross:61,cross_decomposit:61,crt:40,cs:61,cst:52,csv:[7,17],ct:61,ctree:36,cubic:61,cubic_coeff_a:61,cudnn:61,cumsum:61,cumul:61,current:[26,36,59,60,61,66],current_mean:61,current_var:61,custom:[0,1,17,19,22,25,26,27,36,49,50,52,57,59,60,61,66],custom_conversion_funct:0,custom_metadata_map:20,custom_op:3,custom_pars:[0,13,14,31,49,53,60],custom_shape_calcul:0,customoptransform:10,custompredictabletsn:12,customvalidatorclassifi:[13,14],cv:61,cycl:64,cyclic:61,d1:61,d2:[40,61],d:[4,6,9,18,25,26,29,31,32,33,35,36,38,39,40,43,44,45,46,47,48,49,50,52,61],d_0:61,d_1:61,d_2:61,d_:61,d_axi:61,d_k:61,d_n:61,data1:53,data:[0,2,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,21,24,25,26,29,30,31,33,34,35,37,38,39,40,41,42,44,45,46,47,48,49,50,53,56,58,60,61],data_0:61,data_batch:61,data_channel:61,data_dens:52,data_featur:61,data_in:60,data_input:18,data_output:18,data_shap:61,data_spars:52,data_typ:[0,3,4,7,8,9,10,11,12,13,14,16,17,18,19,21,23,24,25,26,31,32,35,37,41,42,44,45,46,48,49,50,52,53,56,58,60],datafram:[3,4,7,17,18,28,30,43,48,52,53,54,55,57,58,63],dataset:[2,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,24,25,26,29,30,31,32,33,34,35,36,37,38,41,42,44,45,46,47,48,49,50,56,58],datatyp:61,dbscan:61,dcr:61,de:61,deactiv:[11,59],deal:[36,60],debug:[0,19,37],dec2:[45,47,50],dec:[45,46,47,48,49,50],dec_add:32,dec_addcst:32,dec_c0:32,dec_div:32,dec_divcst:32,dec_neg1:32,dec_neg:32,dec_pow:32,dec_powcst:32,dec_sum0:32,dec_sum:32,dec_y01:32,dec_y0:32,dec_z0:32,decai:61,decay_factor:61,decid:61,decis:[6,33,36],decision_funct:18,decision_function_shap:61,decision_leaf:33,decision_scores_:53,decisiontreeclassifi:[19,33,61],decisiontreeregressor:[6,33,36,61],declar:[0,12,13,48,49,50,61],declare_local_oper:[12,13,14,31,49,53],declare_local_vari:[12,13,14,31,49,53],decod:61,decode_error:61,decomposit:[1,4,18,26,27,40,47,60,61],decorrel:[45,46,47,48,49],decorrelate_transformer_convert:[45,46,47,48,49],decorrelate_transformer_pars:49,decorrelate_transformer_shape_calcul:[45,46,47,48,49],decorrelatetransform:[45,46,47,48,49],decreas:61,dedic:[5,12,22,59],deep:[12,36,37,40,61],def:[0,3,6,7,10,12,13,14,17,21,22,26,29,31,32,33,36,40,41,43,45,46,47,48,49,50,52,53],default_:61,default_batch_s:0,default_float:61,default_int64:61,default_str:61,defin:[0,13,14,16,22,26,32,36,45,46,47,49,50,53,54,58,60,61,66],definit:[7,12,17,22,61],degre:61,degrees_of_freedom_prior:61,del:18,deleg:0,delet:26,delimit:0,delta:[36,61],demonstr:[23,31,36,46],denot:[0,36,61],dens:61,depend:[2,5,21,34,50,52,58,59,60,61,64],deploi:[9,20,28,30,32,55,57,63],deploy:36,deprec:[25,44,50,61],depreci:61,depth0_add1:32,depth0_add:32,depth0_c01:32,depth0_c0:32,depth1_add1:32,depth1_add:32,depth1_c01:32,depth1_c0:32,depth2_add1:32,depth2_add:32,depth2_c01:32,depth2_c0:32,depth:61,depthtospac:61,dequant:61,dequantizelinear:61,deriv:61,desc:50,descent:[21,61],describ:[7,11,17,21,22,32,35,58,61],descript:[20,61],design:[28,33,51,55,57,66],desir:61,dest:[7,17],det:61,detail:[7,17,19,39,58,61],detect:[33,61],detect_neg:61,detect_posit:61,detector:53,detector_:53,determin:[0,36,61],determinist:61,dev:3,devblog:61,develop:[36,58,64],devianc:61,deviat:[3,12,30,48,61],devic:2,df1:53,df:[3,4,7,17,18,30,43,52,54],df_skl:30,df_train:52,dh:61,diag:[45,46,48,49,50],diagon:61,dicrep:53,dict:[3,11,12,26,33,35,39,40,43,48,52,60],dict_input:[29,33,36,49],dictionari:[0,7,8,11,17,33,35,39,59,61],dictionarylearn:61,dictionarytyp:19,dictvector:[26,61],did:[33,38],diff:[16,29,36,37,45,46,47,48,49,50,52],diff_label:53,diff_proba:53,differ:[0,1,5,6,7,16,17,23,27,29,33,34,35,36,37,39,40,44,45,46,47,48,52,59,60,61,66],differenti:61,difficult:[16,22,26,36,50,64,66],dig:[32,39],digit:[4,12,18],dilat:61,dim:[3,17,19,21,22,23,32,61],dim_param:61,dim_valu:[3,17,19,21,22,23,32,61],dimens:[12,13,14,15,37,40,53,58,61],dimension:61,diment:61,dinput:22,direct:61,directli:[2,39,61],dirichlet_process:61,disabl:[16,19,61],disagre:36,disambigu:61,disc:43,disc_split:43,discard:61,discontinu:[36,61],discord:36,discrep:[1,26,27,44,52,67],discriminant_analysi:61,discuss:58,dish:40,disp:43,disp_split:43,displai:[0,5,23,33,34,39,61,65],dist:12,distanc:[3,59,61],distinguish:0,distribut:61,distutil:43,div:[6,32,33,36,61],div_by_numb:[30,48],div_cast:[6,33,36],divid:61,dividend:61,divis:[6,61],divisor:61,dk:61,dmatrix:44,dn:61,doc:[12,61],doc_str:[0,17,19,20,32],docstr:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],document:[4,9,12,16,18,22,26,29,33,60],doe:[0,4,6,7,12,13,14,15,17,18,21,22,26,32,34,36,39,41,44,46,50,51,52,58,59,60,61,65],doesn:[26,61],dom:32,domain:[0,3,17,19,20,21,22,23,26,32,50,56,61,66],don:[12,50],done:[7,16,17,30,33,36,38,59,60,61],dori:61,dot:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,39,54,60],dotproduct:16,doubl:[0,1,6,7,15,17,27,29,36,43,45,46,47,50,53,61],doubletensortyp:[16,39],down:61,download:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],download_fil:40,downsampl:61,draw:[1,27,38,61],drawn:61,drop:[7,17,39,52,61],dropout:61,dt:[6,36],dtrain:44,dtype:[0,2,3,6,7,12,14,15,16,17,18,21,22,23,26,31,33,35,36,37,38,39,40,41,45,48,49,50,53,60,61],du:56,dual:61,duchi11a:61,due:[15,17,36,37,40,60],dumdf1:53,dump:[16,18,45,47,50,60],duplic:61,dure:61,dw:61,dx:[36,61],dy:[36,61],dynam:[22,37,61],dynamicquantizelinear:61,dz:61,e:[0,5,7,13,14,15,16,17,24,25,32,34,36,39,40,44,45,47,52,53,60,61,65],e_:12,each:[0,4,7,11,12,18,30,35,60,61,63],early_stop:61,easi:[10,17,40,57,58,60,61],easier:[2,33,46,61],easili:[18,37,61,66],east_coker_elm:40,edg:61,effect:[59,61],effici:[7,17,33,49,50,61],eg:61,egret:40,egretta:40,eig:[45,46,48,49],eigen:50,eigen_solv:61,eigenvalu:50,eigenvector:50,eigv:50,eigval:50,einstein:61,einsum:61,either:[15,38,61],el:40,elasticnet:[38,61],elasticnetcv:61,elem_typ:[3,17,19,21,22,23,32],element:[0,60,61],element_typ:19,elementwis:61,elif:[7,12,17,52],ellipsi:61,ellipticenvelop:61,els:[4,7,12,13,14,17,31,36,40,43,48,52,61],else_branch:61,elu:61,embark:[7,17],embarkedout:17,embed:12,emit:61,empiricalcovari:61,empti:[0,16,26,50,59,61],en:61,enabl:[0,19,33,56,59,61],enc:31,encapsul:0,enclos:61,encod:[25,31,44,61],encourag:61,end1:61,end:[13,14,19,30,33,49,53,59,61],end_i:61,end_x:61,endn:61,engin:56,enough:[0,19,22,52,59],ensembl:[9,29,30,32,33,39,56,61],ensur:[7,17,61],entir:61,entri:[0,61],entropi:61,enumer:[0,4,11,18,26,35,40],enumerate_model_node_output:[0,17],enumerate_pipeline_model:60,ep:61,epgk:[52,53],epkg:[36,38,40,44],epsilon:61,epsilon_insensit:61,eq2_0_c0:32,eq2_0_cast:32,eq2_0_equ:32,eq2_0_output0:32,eq2_1_c0:32,eq2_1_cast:32,eq2_1_equ:32,eq2_1_output0:32,eq2_2_c0:32,eq2_2_cast:32,eq2_2_equ:32,eq2_2_output0:32,eqp2p_m1_0_add:32,eqp2p_m1_0_addcst:32,eqp2p_m1_0_c0:32,eqp2p_m1_0_max02:32,eqp2p_m1_0_max0:32,eqp2p_m1_0_max1:32,eqp2p_m1_0_max:32,eqp2p_m1_0_maxcst1:32,eqp2p_m1_0_maxcst:32,eqp2p_m1_1_add:32,eqp2p_m1_1_c0:32,eqp2p_m1_1_max02:32,eqp2p_m1_1_max0:32,eqp2p_m1_1_max1:32,eqp2p_m1_1_max:32,eqp2p_m1_2_add:32,eqp2p_m1_2_c0:32,eqp2p_m1_2_max02:32,eqp2p_m1_2_max0:32,eqp2p_m1_2_max1:32,eqp2p_m1_2_max:32,eqp2ps0_c0:32,eqp2ps0_mul:32,eqp2ps1_c0:32,eqp2ps1_mul:32,eqp2ps2_c0:32,eqp2ps2_mul:32,eqp_log0_add:32,eqp_log0_addcst:32,eqp_log0_c01:32,eqp_log0_c0:32,eqp_log0_log:32,eqp_log0_mul:32,eqp_log0_output0:32,eqp_log1_add:32,eqp_log1_c01:32,eqp_log1_c0:32,eqp_log1_log:32,eqp_log1_mul:32,eqp_log1_output0:32,eqp_log2_add:32,eqp_log2_c01:32,eqp_log2_c0:32,eqp_log2_log:32,eqp_log2_mul:32,eqp_log2_output0:32,eqp_ns0_c01:32,eqp_ns0_c0:32,eqp_ns0_div:32,eqp_ns0_max0:32,eqp_ns0_max:32,eqp_ns0_mul:32,eqp_ns0_mulcst:32,eqp_ns1_c01:32,eqp_ns1_c0:32,eqp_ns1_div:32,eqp_ns1_max0:32,eqp_ns1_max:32,eqp_ns1_mul:32,eqp_ns2_c01:32,eqp_ns2_c0:32,eqp_ns2_div:32,eqp_ns2_max0:32,eqp_ns2_max:32,eqp_ns2_mul:32,equal:[32,56,61],equat:61,equival:[0,6,21,35,45,52,60,61],eras:61,ereg:[29,30],erf:61,error:[0,1,5,16,18,19,25,27,29,32,34,36,44,45,47,52,60,61,65],error_scor:61,eseanu:56,essenti:61,estim:[12,13,14,21,29,30,35,50,61],estimator_:[12,13,14],eta0:61,etc:61,ether:61,euclidean:3,eval_metr:[25,44,52],evalu:[25,44,52,61],even:[6,11,13,14,16,25,26,35,36,43,45,47,60,61,65],everi:[0,4,6,7,12,13,14,16,17,18,22,25,31,32,33,34,35,36,38,40,43,44,45,46,47,50,53,54,56,59,60,61,63,64,65,66],everyth:50,everywher:[36,61],evid:54,ex:61,exact:[26,59,60,61],exactli:[26,61],examin:61,exampl:[0,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,29,30,32,33,34,35,36,37,38,39,40,42,43,45,46,47,48,49,51,52,53,56,57,58,59,61,62,64,65,67],example1:23,example2:15,exce:61,exceed:61,except:[0,4,5,7,13,14,15,16,17,24,25,26,32,34,36,39,40,44,45,47,52,53,60,61,65],exclud:61,exclude_outsid:61,exclus:61,execut:[3,27,55,59,61],exercis:50,exist:[0,12,13,21,22,25,40,44,47,50,56,59,60,61,63,66],existing_nam:[0,41],exit:[36,40],exp1:6,exp2:6,exp:[12,21,45,46,47,48,49,50,61],expand:61,expans:61,expect:[0,13,14,15,25,40,43,44,45,46,47,49,50,53,60,61],expected_attribut:50,expected_input:50,expected_label:53,expected_output:50,expected_proba:53,explain:[7,17,30,36,43],explicit:[0,6,19,52,61],explicitli:[25,44,52,61],expon:61,exponenti:61,expos:[0,35],express:[26,43,59],extend:[2,12,16,25,32,36,39,40,44,57,60,61],extra:[0,16,61],extract:[26,32,60,61],extrapolation_valu:61,extratreeclassifi:[33,61],extratreeregressor:[33,61],extratreesclassifi:[33,61],extratreesregressor:[33,61],extrem:[54,61],extrema:61,eyelik:61,f1:26,f32:36,f:[2,7,9,12,15,17,18,22,23,24,25,26,36,38,40,42,43,44,52,56,58,61],f_classif:61,facilit:36,fact:[16,45],factor:[21,36,61],factoranalysi:61,fail:[0,4,5,9,13,14,15,16,17,18,19,24,25,33,34,36,37,44,52,53,60,61],fals:[0,7,11,12,14,19,24,25,29,31,33,34,37,38,39,41,42,44,45,47,48,49,50,52,54,59,60,61],falsefals:19,famou:58,far:61,fare:[7,17],fare_cast:17,fast:[0,28,55,57,66],faster:[5,11,26,30,34,35],fastica:61,fc:40,fct_parser:0,featur:[0,7,12,17,26,31,36,45,46,47,48,49,50,54,58,61],feature_extract:[26,52,61],feature_names2:53,feature_rang:61,feature_select:61,featureagglomer:61,featurehash:61,featureunion:61,featurevector:61,fed:[19,61],feed:[15,61],femal:17,fenc:40,fetch:[61,65],fetch_20newsgroup:26,fid:23,field:[26,61],fig:[5,6,7,10,12,13,14,17,22,24,25,26,32,36,40,54],figsiz:[5,6,7,10,12,13,14,17,22,24,25,26,36,40,54],figur:[33,61],file:[0,4,9,16,18,25,27,33,38,40,44,55,61],filenam:[0,23],fill:[0,5,6,7,10,12,13,14,17,22,24,25,26,54,61],fill_valu:[7,17,61],fillcolor:[5,6,7,10,12,13,14,17,22,24,25,26,54],fillna:[7,17],filter:61,filter_in_channel:61,filter_out_channel:61,filter_spati:61,final_estim:61,final_state_and_scan_output:61,final_typ:[0,41,61],find:[0,13,14,17,18,25,26,45,47,50,60,61,65],fine:[10,61],finlei:56,first:[0,2,5,10,11,12,14,15,17,21,22,26,29,33,34,35,36,37,40,45,46,47,50,53,54,58,59,60,61],fit:[2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,58,60,61],fit_intercept:61,fit_inverse_transform:61,fit_path:61,fit_prior:61,fit_transform:[12,21,26],five:61,fix:[6,15,16,61],flag:61,flagpol:40,flagstaff:40,flatten:[0,40,61],flatten_transform:61,flaw:33,flip:61,float16:61,float32:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,58,60,61],float64:[6,7,15,16,17,21,31,36,45,46,47,61],float_data:[21,23,32],float_input:[0,8,9,11,19,35,44,53,56,58],floattensortyp:[0,3,4,7,8,9,10,11,12,13,14,16,17,18,19,21,24,25,31,35,37,39,41,42,44,52,53,56,58,60],flog:37,floor:61,flow:61,fmin_l_bfgs_b:61,fmod:61,focus:3,fold:61,follow:[0,3,4,6,9,12,15,16,18,21,25,26,32,33,36,38,39,40,43,44,45,47,49,52,56,57,58,59,60,61,62,64,65],fontdict:12,fontsiz:40,footstal:40,forc:[26,61],forest:[9,19,43,60],forget:61,form:[26,61],formal:61,format:[0,2,11,12,13,14,15,20,22,29,35,40,56,61],formul:61,formula:61,forward:61,found:[0,13,14,19,22,25,44,45,47,56,60,61,65],fountain:40,four:[60,61],fourth:[10,35,61],foward:61,fp32:61,frac:[45,47],framework:[0,2,40,61],frequent:36,friedman_ms:61,from:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,64,65],from_sklearn:0,from_valu:61,frontend:61,ft:61,full:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,60,61],full_nam:[12,13,14,46],fulli:[26,29,31,60],func:61,functiontransform:61,furnac:40,further:61,fuse:61,futur:[25,44],futurewarn:52,fvar_0:29,fvar_1:29,fvar_2:29,g:[0,36,61],g_regular:61,galerita:40,galleri:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,65],gamma:61,gammaregressor:61,gate:61,gather:[32,61],gatherel:61,gathernd:61,gaussian:[45,46,47,48,49,50],gaussian_process:[16,61],gaussianmixtur:[33,61],gaussiannb:[33,61],gaussianprocessclassifi:[33,61],gaussianprocessorregressor:[1,27],gaussianprocessregressor:[16,33,61],gaussianrandomproject:61,gb:[29,30],gcv_mode:61,gdpi:[5,6,7,10,12,13,14,17,22,24,25,26,54],ge_gemm:37,ge_gemmcst1:49,ge_gemmcst:37,ge_y0:37,gemm:[33,37,48,49,61],gener:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,61],genericunivariateselect:61,gensim:26,german:26,germani:26,get:[0,12,13,14,25,26,35,36,37,45,47,59,60,61,65],get_allowed_opt:33,get_devic:2,get_domain_opset:32,get_dummi:53,get_exampl:20,get_first_dimens:[31,45,46,53],get_input:[9,10,15,19,22,40,41,44,56,58],get_latest_tested_opset_vers:0,get_model_alia:[12,13,14,31,49,53],get_modelmeta:20,get_nam:0,get_opt:[43,48],get_output:[9,11,15,19,22,33,35,38,41,44,56,58],get_param:[12,59],get_shape_calcul:[12,13],get_unique_operator_nam:[0,12,13,46],get_unique_variable_nam:[0,13,46],get_xaxi:[29,33,34,37,39,42,44,45,47,48,49,50],get_yaxi:[29,33,34,37,39,42,44,45,47,48,49,50],getlogg:[0,19],getopnodeproduc:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],getpydotgraph:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],getvalu:[45,47,50],geyser:40,gini:61,github:[12,13,14,22,25,26,32,35,40,45,47,56,61,65],githubusercont:[7,17],give:[17,22,29,33,36,50,59,60,61],given:[0,7,13,14,17,22,60,61],global:[3,4,5,34],globalaveragepool:61,globallppool:61,globalmaxpool:61,go:[0,61],goal:[13,14],goe:[15,37,57],golf:40,gong:56,good:[13,14,16,22,29],good_input_typ:0,good_output_typ:0,got1:6,got2:[6,39,48],got:[12,15,21,39,43,45,46,47,48,50],got_split:43,gpr:16,gpu:[2,36,56,58],gradient:[21,61],gradientboostingclassifi:[33,61],gradientboostingregressor:[29,30,61],gram:61,graph:[2,4,5,6,8,16,18,19,21,23,28,29,30,32,33,34,36,43,46,48,52,55,57,59,60,61,63,65],graph_def:22,graph_nam:20,graph_nmf:21,graphicallasso:61,graphicallassocv:61,graphproto:[0,22],graphviz:29,graphviz_help:[29,33,34,37,39,42,44,45,47,48,49,50],gray_r:12,great:[14,40],greater:[32,61],greater_equ:61,greaterorequ:61,grid:61,grid_sampl:61,gridsampl:61,gridsearchcv:61,groom:40,ground:61,group:[50,61],grow:[43,66],gru:61,guess:[0,50,61],guess_data_typ:60,guess_numpy_typ:[45,48,49,50,53],guess_proto_typ:[46,50],guess_schema_from_data:39,h:[21,61],h_0:61,h_1:61,h_adapt:61,h_new:61,h_out:61,h_sqrt:61,h_tminus1:61,ha:[0,7,16,17,22,33,37,40,45,47,49,56,59,60,61,64],half:61,half_pixel:61,hand:61,handl:[0,4,7,13,15,17,18,25,31,39,44,46,59,60,61],handle_unknown:[7,17,39,60,61],happen:[6,12,15,32,36,37,45,46,47,48,49,53,66],hard:61,hardmax:61,hardsigmoid:61,hardswish:61,harm:36,hasattr:12,have:[0,12,16,32,33,39,45,46,47,53,54,59,61,64,65],he:[12,21],header:26,height:61,held:39,hello:61,help:17,helper:[0,4,17,18,22,37,38,60],helpgen:[29,33,34,37,39,42,44,45,47,48,49,50],henc:61,here:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,58,61],heron:40,heterogen:[0,26,61],hidde_s:61,hidden:61,hidden_layer_s:61,hidden_s:61,hide:26,high:[58,61],higher:[15,32,61],highlight:16,hing:61,histgradientboostingclassifi:[33,52,61],histgradientboostingregressor:[33,61],histor:61,histori:61,hold:[0,61],home:[7,17,40],honor:61,hot:61,how:[2,6,11,12,16,18,20,23,24,25,26,31,33,38,40,41,42,43,44,45,47,48,50,51,52,60,61,62,64,65],howev:[36,41,56,59,60,61],ht:61,html:[4,9,18,33,61],http:[4,7,9,12,13,14,17,18,25,29,33,40,45,47,61,65],huberregressor:61,huge:36,hurt:52,hyperbol:61,i0:[10,11],i10:35,i12:35,i14:35,i1:[11,54],i2:[11,54],i:[0,4,6,11,12,17,18,19,21,22,25,26,31,32,35,36,40,41,43,44,47,50,52,61],i_:61,id:[0,5,7,8,11,33,34,35,45,59,61],idea:[36,61],ident:[0,21,45,46,47,48,49,50,61],identif:61,identifi:61,idf:[52,61],idx1:61,idx2:61,idx:61,ieee:61,ieeexplor:61,iforest:[28,55,57,64],ignor:[7,17,35,39,43,52,59,60,61],ignore_index:61,ij:21,illustr:[0,61],iloc:31,im0:40,im2arrai:40,im:40,imag:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,61],imagebox:12,imagenet:61,imagenet_class:40,imagin:[4,18,61],img:[12,40],imgs_test:12,imgs_train:12,impact:[41,43,59],implement:[0,2,4,5,7,8,10,13,14,17,18,19,21,22,24,25,26,28,30,31,32,33,34,36,42,43,44,48,49,50,51,53,54,55,57,58,59,60,61,62,63,64,65,66],implementor:61,impli:61,implicit:[7,17,60,61],implicitli:61,importance_gett:61,importerror:[15,26,52,53],impos:[16,61],imput:[4,7,17,18,60,61],imputed_valu:61,imputed_value_float:[17,61],imputed_value_int64:61,imread:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54],imshow:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54],includ:[0,12,23,24,25,31,36,42,44,50,51,60,61],include_bia:61,inclus:61,incom:61,incompat:39,increas:[4,9,11,18,33,38,61],increment:61,incrementalpca:61,ind:[24,25,31,42,44,52,60],independ:[60,61],independetli:18,index:[15,31,61],indic:[0,13,14,15,16,21,58,61],indices_shap:61,indices_slic:61,indices_typ:21,individu:[12,29,61],induc:61,inf:[54,61],infer:[0,29,39,50,58,60,61],infer_shap:50,infer_typ:19,inferencesess:[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,29,30,31,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,52,53,54,56,58,60],infin:[0,61],infinit:[0,54,61],infix:61,info:19,inform:[0,11,18,19,22,35,38,45,47,58,60,61],ingest:[39,50],ingli:56,inherit:[0,10,61],init:[12,29,33,39,49,61],init_1:61,init_n:61,init_param:61,init_s:61,initi:[0,16,17,21,23,32,36,40,58,60,61],initial_c:61,initial_h:61,initial_input:7,initial_state_and_scan_input:61,initial_typ:[0,4,8,9,10,11,16,18,19,26,35,39,41,44,52,53,56,59,60,61],inner:61,innermost:61,inp:40,inplac:[7,17,61],input0:[13,14],input:[0,3,4,6,10,12,13,14,15,16,18,19,21,22,23,24,25,26,28,29,31,32,33,36,37,42,44,45,46,47,48,49,50,52,53,55,57,58,59,60,61,63],input_count_rang:[0,31],input_dim:[31,45,46,47,48,49,50,61],input_dimens:61,input_feature_dimens:61,input_forget:61,input_mean:61,input_nam:[0,9,10,15,19,41,44,56,58],input_rang:50,input_rank:61,input_s:61,input_sequ:61,input_shap:61,input_spatial_shap:61,input_typ:[31,45,46,47,48,49,50],input_var:61,input_x:61,insensit:61,insert:[0,6,40,44,52,61],insert_replac:52,inspect:[12,13,14],inspir:26,instal:[22,29,43,61],instanc:[0,12,20,59,61],instancenorm:61,instead:[15,26,29,35,36,39,52,61],instruct:54,int16:61,int32:61,int64:[6,7,13,14,15,17,18,21,31,33,35,36,37,38,41,50,53,61],int64_data:[17,32],int64_input:0,int64tensortyp:[0,7,13,14,17,19,41,53],int8:61,int_max:61,int_min:61,integ:[0,11,25,31,33,35,36,39,44,54,59,61],integr:61,intend:61,interaction_onli:61,intercept:[17,61],intercept_sc:61,interest:[12,61],interior:61,intermedi:[0,1,12,13,27,28,38,55,57,60,61,63],intermediate_zero_point:61,intern:16,interpol:61,interpret:40,intersect:61,interv:[31,54,61],introduc:[6,29,36,43,52,60],introduct:56,inv_kw_arg:61,inv_std_:12,invalid:[15,61],invalid_argu:15,invalidargu:[15,40],invari:61,invers:[12,45,46,47,48,49,50,61],inverse_func:61,inverse_indic:61,inverse_transform:21,invert:61,investig:[1,27,28,29,55,57,63],invok:[0,39],involv:[9,16,50,57,60,61],invscal:61,io:[40,45,47,50],iof:61,iofc:61,iou:61,iou_threshold:61,ipynb:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],ir_vers:[3,17,19,20,21,22,23,32],iri:[8,9,11,15,19,31,33,35,41,52,56,58],is_case_sensit:61,is_data_valid:61,is_deprec:50,is_evalu:19,is_f:19,is_leaf:19,is_model_valid:61,is_root:19,is_test:61,isinf:61,isinst:33,isnan:[31,61],isol:60,isolationforest:[32,33,61],isoton:61,isotonicregress:61,isra:26,issu:[6,13,14,25,28,37,43,44,45,47,53,55,56,57,59,63,65,66,67],item:[0,12,31,33,61],iter:[4,9,18,19,33,61],iterated_pow:61,itermediate_zero_point:61,its:[0,3,5,11,12,22,23,33,34,35,43,50,59,60,61,64,65],itself:61,ivan:56,j:[21,61],j_1:61,j_2:61,j_:61,j_k:61,jew:26,jiang:56,jitter:61,jmlr:61,johnson:61,jointli:61,jpg:40,jupyt:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],just:[12,21,33,49,50,61],k1:61,k2:61,k:[7,12,17,21,32,33,36,43,50,61],k_mean:46,kakato:40,kbinsdiscret:61,keep:[12,20,33,36,37,38,52,60,61],keep_empty_str:33,keep_tsne_output:12,keepdim:[13,14,45,46,48,49,50,61],keepgo:61,keepgoing_in:61,keepgoing_out:61,kei:[0,26,36,39,61],keith:26,kera:40,kernel:[16,61],kernel_approxim:61,kernel_param:61,kernel_ridg:61,kernel_shap:61,kernel_spatial_shap:61,kernel_typ:61,kernelcenter:61,kerneldens:61,kernelpca:[33,61],kernelridg:61,key_typ:[17,19],keys_:61,keys_int64:32,keys_str:61,kh:61,ki:37,kind:[15,33,60,61],km:37,kmean:[33,37,61],kn:61,kneighborsclassifi:[33,61],kneighborsregressor:[12,33,61],kneighborstransform:[33,61],knn_op:12,knn_output:12,knnimput:[33,61],know:[7,13,14,16,17,24,25,34,42,44,45,46,47,50,60],known:[50,58,60,61],kr:37,kv:37,kw:61,kw_arg:61,kwarg:[10,12,50,52,61],l1:[17,61],l1_ratio:[38,61],l2:61,l2_regular:61,l:[17,41,45,46,48,49,50,61],l_1:61,l_2:61,l_i:61,l_n:61,lab_pr:53,label:[2,7,11,13,14,15,17,19,25,32,33,35,36,37,38,39,40,41,44,45,49,53,54,56,61],label_encoder_deprecation_msg:[25,44],label_nam:[9,19,44,56,58],labelbinar:61,labelencod:[32,61],labelpropag:61,labelspread:61,lakeshor:40,lakesid:40,lambd:61,lambda:[11,35,43],lambda_1:61,lambda_2:61,lambda_init:61,languag:[30,45,47],lar:61,larger:61,largest:61,larscv:61,lasso:61,lassocv:61,lassolar:61,lassolars:61,lassolarscv:61,last:[0,7,17,32,33,37,38,40,50,56,59,60,61,66],latentdirichletalloc:61,latest:[0,32],latter:61,layer:[37,61],layout:61,lbfg:[4,7,9,17,18,33,60,61],lead:61,leaf:[19,32,61],leaf_siz:61,leakag:61,leakyrelu:61,learn:[0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,30,31,32,33,35,36,37,42,44,45,47,50,52,53,55,57,58,59,60,63,64,65],learner:[25,44,52],learning_r:61,learning_rate_init:61,least:[10,16,32,61],leav:[36,61,66],ledoitwolf:61,left:[36,43,54,61],leftright:54,legaci:61,legend:36,len:[4,13,14,16,18,26,31,32,33,40,53,57,61],length:61,length_origin:61,length_res:61,length_scal:16,leqslant:36,less:[32,46,61],less_equ:61,lessorequ:61,let:[0,2,3,4,5,6,7,8,10,12,13,14,16,17,18,20,21,22,24,25,26,31,32,33,34,36,39,40,42,43,44,45,46,47,48,49,52,58,59,60,61],letter:61,level:[0,19,58],level_3:61,leverag:[10,47],lgbm:[24,25,42],lgbmclassifi:52,li:56,lib:[4,9,18,25,33,38,44],liblinear:[13,14,41],librari:[0,13,14,24,25,32,38,40,42,43,44,45,46,47,56,57,59,60,63,65],libsvm:56,licens:56,light:40,lightgbm:[1,27,28,52,53,55,56,57,60,62],lightgbmlgbmclassifi:[24,33,42,52],lightgbmlgbmregressor:[33,43],lighthous:40,like:[0,13,14,17,21,25,38,44,49,52,61],likelihood:61,likewis:61,limit:[4,9,18,26,33,61],linalg:[45,46,48,49,50],line:[7,16,17,26,32,50,59,60],linear:61,linear_before_reset:61,linear_model:[2,4,7,8,9,11,13,14,15,17,18,26,29,30,33,35,38,41,58,60,61,65],linearclassifi:[17,33,41,61],lineardiscriminantanalysi:61,linearregress:[29,30,61],linearregressor:61,linearsvc:[26,33,61],linearsvr:61,link:[13,61],linv:[45,46,48,49,50],list:[0,1,7,8,11,17,22,26,27,28,32,35,39,40,43,46,50,52,55,57,59,60,61,63,66,67],liter:61,live:61,live_decorrelate_transformer_convert:50,live_decorrelate_transformer_shape_calcul:50,livedecorrelatetransform:50,lixin:56,ll:58,ln:61,load:[0,2,15,18,20,23,45,47,50],load_boston:16,load_diabet:[29,44],load_digit:[4,12,18],load_iri:[2,5,8,9,11,13,14,15,19,24,25,31,33,34,35,37,38,41,42,44,45,46,47,48,49,50,52,56,58],load_onnx_model:[0,17],local:[12,13,61,65],localoutlierfactor:[33,61],locat:61,log:[1,27,32,61],log_prob:61,logger:[0,19],logi:30,logic:[61,65],logist:[2,4,9,15,17,18,20,33,61],logisticregress:[0,2,4,7,8,9,11,13,14,15,17,18,26,33,35,38,41,58,59,60,61,65],logisticregressioncv:61,logreg:[2,26],logreg_iri:[2,9,15,20,58],logsoftmax:61,logx:30,longer:[3,50],look:[0,4,7,13,14,15,16,17,18,21,22,23,29,31,36,39,49,52,56,61,65],lookup:61,loop:[60,61],loss:[12,61],loss_:12,lost:35,low:61,lower:[0,61],lowercas:61,lowest:61,lp:[50,61],lpnormal:61,lppool:61,lr:[29,30],lrn:61,lstm:61,ly:61,m:[21,36,56,57,61],m_:21,ma_matmulcst:49,machin:[0,10,12,22,32,50,58,60,61,63],macro:26,made:[16,54],magnitud:[16,36],mai:[0,5,6,7,8,11,13,14,16,19,21,25,29,30,32,33,35,36,37,39,43,44,45,46,47,50,52,54,59,60,61,65,66,67],main:[0,32,40,61],maintain:[61,64],mainten:64,major:[36,38,61],make:[0,2,6,22,26,33,45,47,60,61],make_blob:32,make_classif:44,make_graph:22,make_model:22,make_nod:22,make_pipelin:[10,52],make_regress:[6,30,36],make_tensor_value_info:22,male:17,mandatori:[0,50,58,61,66],mani:[5,11,15,16,24,25,28,29,34,35,36,37,42,44,47,55,57,58,60,61,63,64],manifold:12,manual:[0,59,61],manufactur:40,map:[0,11,19,31,35,59,60,61],map_form:61,map_typ:[17,19],margin:[29,61],mask:61,master:[12,61],mat:21,matantsev:56,match:61,materi:0,math:6,mathbb:12,matmul:[46,48,61],matmulinteg:61,matplotlib:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,32,36,40,43,54],matric:[16,21,28,35,45,46,47,48,49,50,55,57,61,67],matrix:[7,8,11,12,16,17,21,26,35,39,40,45,46,47,48,49,50,52,58,61],matter:61,max:[3,6,12,13,14,16,29,32,36,37,43,45,46,47,48,49,50,53,57,61],max_bin:61,max_depth:[6,33,36,52,61],max_df:61,max_exec:[3,30,48],max_featur:61,max_fun:61,max_it:[4,8,9,11,18,33,35,38,52,60,61],max_iter_predict:61,max_leaf_nod:61,max_map:61,max_n_alpha:61,max_no_improv:61,max_output_boxes_per_class:61,max_sampl:61,max_sequence_length:61,max_skip:61,max_subpopul:61,max_trial:61,max_trip_count:61,maxabsscal:[33,61],maxdiff:6,maxim:[0,61],maximum:[6,43,56,61],maxpool:61,maxroipool:61,maxunpool:61,mb:[27,55],md1:6,md2:6,md:[12,22,61],mean:[0,3,10,12,13,14,19,25,33,36,38,45,46,47,48,49,50,54,60,61,63,65],mean_:[12,45,46,47,48,49,50],mean_nam:46,mean_ob:30,mean_precision_prior:61,mean_prior:61,mean_squared_error:12,meaning:61,means_init:61,meanshift:61,meant:[29,61,65],meanvariancenorm:61,measur:[13,14,36,61],measure_tim:[3,30,48],mechan:[11,16,35,58,59,60,61,64],median:[7,17,60],member:[12,50],memori:[38,61],mention:[13,14],merg:[7,17,36],merged_column:17,merror:[25,44,52],messag:[0,19,25,35,44,61,65],met:61,meta:20,metadata:[1,27],metadata_prop:20,meth:12,method:[0,4,8,12,13,14,16,18,37,50,59,60,61],metric:[3,12,25,26,44,52,61],metric_param:61,microsoft:[3,26,56],might:[12,15,33,45,46,53,60],mimic:61,min:[3,12,16,36,37,61],min_:53,min_categori:61,min_df:[26,61],min_exec:[3,30,48],min_features_to_select:61,min_impurity_decreas:61,min_sampl:61,min_samples_leaf:61,min_samples_split:61,min_siz:40,min_weight_fraction_leaf:61,mincovdet:61,mini:61,minibatchdictionarylearn:61,minibatchkmean:[33,61],minibatchsparsepca:61,minim:0,minimum:61,minkowski:61,minmaxscal:[33,53,61],minut:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],mirror:[40,61],misalign:61,misc:26,mismatch:[37,60],miss:[5,7,17,35,52,61,65],missing_valu:61,missingind:61,missingshapecalcul:60,misspel:15,mistak:[15,60],mix:[7,17,40,60],mixtur:[5,34,61],mixture2:5,ml:[0,4,7,12,17,18,19,21,24,25,32,36,42,43,44,50,52,53,56,57,60,61,66],mlinsight:[12,39,40],mlmodel:12,mlogloss:[25,44,52],mlpclassifi:[33,61],mlpregressor:[12,61],mlprodict:[29,30,33,34,36,37,39,40,42,44,45,47,48,49,50,51,57],mmust:[12,13,14],mobil:40,mod:[21,57,61],mode:[22,59,61],model1:[6,53],model2:[6,36],model3:36,model:[0,1,2,4,6,7,15,17,18,20,22,26,27,28,30,31,32,34,36,37,42,43,49,50,51,52,55,57,59,60,62,63,64,66],model_byt:40,model_def:[22,33],model_id:0,model_nam:[0,40],model_onnx2:[5,34],model_onnx:[0,4,5,7,12,13,14,17,18,21,24,25,26,34,40,42,43,44,52,59,60],model_onnx_split:43,model_onx:36,model_select:[5,6,7,8,9,11,12,13,14,16,17,19,29,30,33,34,35,36,38,41,44,56,58,61],model_typ:0,model_vers:[3,17,19,20,21,22,23,32],modelcomponentcontain:[0,59],modelproto:[0,22,23],modifi:[0,4,17,18,28,35,55,57,60,63],modul:[0,4,9,18,25,29,33,44,53,58],modulu:61,momentum:61,monotonic_cst:61,more:[7,16,17,21,37,43,46,50,51,52,57,58,61],moreov:61,most:[0,7,17,22,23,32,34,36,45,46,53,56,59,60,61],mostli:61,move:[32,43,61],mt2:[30,48],mt:[30,48],mu_c0:[21,23,33,37],mu_mul:[21,23,37],mu_mulcst:[23,33,37],much:[5,11,16,30,34,35,43,52,61],mul:[21,23,32,37,61],multi:[25,44,52,61],multi_class:[17,61],multiclass:61,multidirect:61,multilabelbinar:61,multinomi:61,multinomialnb:[33,61],multioutput:[35,61],multioutputclassifi:[33,61],multioutputregressor:61,multipl:[2,6,12,15,39,43,59,60,61],multipli:61,multitaskelasticnet:61,multitaskelasticnetcv:61,multitasklasso:61,multitasklassocv:61,must:[0,4,6,7,13,14,17,18,25,32,41,45,47,50,60,61,65,66],mutual:61,mx:[13,14,57],mxnet:56,my_loc:61,mylogisticregress:65,myscal:60,n0_cast:33,n0_linearclassifi:33,n0_sub:49,n0_treeensembleregressor:29,n0_treeensembleregressordoubl:36,n1_gemm:49,n1_mul:33,n1_normal:33,n1_treeensembleregressor:29,n2_add:33,n2_cast:33,n2_linearregressor:29,n2_matmul:49,n3_linearclassifi:33,n3_mul:29,n3_zipmap:33,n4_mul:29,n4_normal:33,n5_mul:29,n6_flatten:29,n7_flatten:29,n8_flatten:29,n9_sum:29,n:[3,12,13,14,17,22,26,29,30,36,37,40,43,45,47,50,53,59,61],n_:12,n_alpha:61,n_bin:61,n_class:[12,44],n_cluster:[10,37,61],n_compon:[12,21,26,60,61],n_estim:[24,25,29,32,33,42,43,44,52,53,61],n_featur:[12,30,32,44],n_features_to_select:61,n_inform:44,n_init:61,n_iter:61,n_iter_i:[4,9,18,33],n_iter_no_chang:61,n_job:[53,60,61],n_neighbor:61,n_nodes_ptr:33,n_nonzero_coef:61,n_op:19,n_restarts_optim:61,n_sampl:[12,32,44],n_subsampl:61,n_support:61,n_target:[12,32],n_var:19,nai:[17,36,63],naive_bay:61,name:[0,2,3,5,6,7,9,10,11,12,13,14,15,17,19,21,22,23,24,25,26,32,33,35,37,38,39,40,44,46,50,53,54,56,57,58,59,60,61],nan:[17,31,33,37,61],nan_euclidean:61,natur:61,nb:36,nchw:61,ncol:3,nd:17,ndarrai:[11,35],ndim:61,ndindex:61,nearest:[12,61],nearest_mod:61,nearestcentroid:61,nearestneighbor:[33,61],necessari:[0,12,59,61],necessarili:[15,22,61],need:[0,3,4,6,7,10,11,12,13,14,16,17,18,21,25,29,30,31,32,33,36,37,39,40,44,45,46,47,48,49,50,51,52,58,59,60,61,65,66],neg:[32,33,61],neg_label:61,negat:61,negativeloglikelihoodloss:61,neighbor:[12,61],neighborhood:61,neighborhoodcomponentsanalysi:61,neighbour:12,neightbour:12,neither:[1,27,61],nest:61,nesterov:61,nesterovs_momentum:61,net:61,net_draw:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],network:[37,40,61],neural:[37,40,61],neural_network:61,never:[38,61],new_axi:61,new_onx:38,newer:33,next:[6,16,36,38],nf:50,nf_:50,ngram:61,ngram_count:61,ngram_index:61,ngram_rang:61,nicer:22,nif:26,nimpli:26,nip:61,nllloss:61,nltk:26,nmf:[1,27,50,51,61],nmf_to_onnx:21,nn:61,nnazi:26,nocl:[24,25,33,42,44,52,59],node1:22,node2:22,node:[3,6,12,16,17,19,21,22,23,32,36,40,43,45,47,50,56,60,61,63,66],node_def:22,node_produc:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],node_sample0_cast:32,node_sample0_gath:32,node_sample0_gathercst:32,node_sample0_labelencod:32,node_sample0_output02:32,node_sample0_output0:32,node_sample0_reshap:32,node_sample0_reshaped0:32,node_sample0_treeensembleregressor:32,node_sample0_y02:32,node_sample0_y0:32,node_sample1_cast:32,node_sample1_gath:32,node_sample1_labelencod:32,node_sample1_output02:32,node_sample1_output0:32,node_sample1_reshap:32,node_sample1_reshaped0:32,node_sample1_treeensembleregressor:32,node_sample1_y02:32,node_sample1_y0:32,node_sample2_cast:32,node_sample2_gath:32,node_sample2_labelencod:32,node_sample2_output02:32,node_sample2_output0:32,node_sample2_reshap:32,node_sample2_reshaped0:32,node_sample2_treeensembleregressor:32,node_sample2_y02:32,node_sample2_y0:32,nodearg:40,nodeproto:[0,22],nodes_:61,nodes_falsenodeid:[19,32],nodes_featureid:[19,32],nodes_hitr:[19,32],nodes_missing_value_tracks_tru:[19,32],nodes_mod:[19,32],nodes_nodeid:[19,32],nodes_treeid:[19,32],nodes_truenodeid:[19,32],nodes_valu:[19,32],nodes_x:61,nogemm:49,non:[45,46,47,48,49,50,61],none:[0,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,58,59,60,61],nonmaxsuppress:61,nonzero:61,noop_with_empty_ax:61,noqa:[5,6,10,22,24,25,31,42,43,54],nor:[1,27,61],norm:[17,33,61],norm_coeffici:61,norm_coefficient_post:61,norm_ord:61,normal:[12,17,36,41,61],normalize_i:61,notat:[50,61],note:[0,59,61],notebook:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],noth:[0,50,61],notic:61,notimplementederror:[29,33,36,39,45,46,48,49,50],notset:61,novelti:61,now:[0,12,13,14,16,35,36,50,58,61],np:[2,3,4,5,6,7,10,13,14,15,17,18,21,22,26,31,53,54,61],nrow:3,nso:26,nstandardscal:12,nu:61,num1:39,num2:39,num:[7,17,60],num_batch:61,num_class:[25,44,61],num_direct:61,num_onnx:17,num_roi:61,num_scan_input:61,num_selected_indic:61,number:[0,3,4,5,9,11,12,13,14,15,18,28,30,31,33,34,35,40,43,45,47,48,50,53,55,56,57,58,59,60,61,63,65],number_of_el:61,numer:[17,58,61],numeric_featur:[7,17,60],numeric_limit:61,numeric_transform:[7,17,60],numfeat:60,numpi:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,57,58,60,61,65],numt:17,numx:17,nusvc:61,nusvr:61,nvidia:61,nxd:61,nyou:36,nyour:26,nystroem:61,o:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,32,33,38,40,41,54,60,61],oa:61,ob:[30,48,52],obelisk:40,object:[0,7,12,17,19,25,26,36,39,40,44,52,60],observ:[11,12,13,14,30,35,43,45,46],obstacl:36,obtain:61,obviou:0,obvious:[12,50],occur:61,occurr:61,odd:61,off:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54],off_valu:61,offici:[50,66],offset:[12,17,40,61],offsetbox:12,offsetimag:12,often:[37,40,50,61],oinf5:36,oinf:[29,30,33,34,37,39,42,44,45,47,48,49,50],ok:[39,50],old:[25,44,52],older:32,omit:[54,61],oml_vers:43,on_valu:61,onc:[12,16,38,40,60,61,66],one:[0,3,4,5,7,10,11,12,13,14,16,17,18,19,22,23,26,28,30,31,32,33,35,36,37,39,43,45,46,49,50,51,52,54,55,56,57,58,59,60,61,63],one_class:61,one_vs_rest:61,oneclasssvm:61,onehot:[7,17,31,39,54,60,61],onehotencod:[0,7,17,39,60,61],onehotencoder1:17,onehotencoder2:17,ones:[3,43,61],onevsoneclassifi:61,onevsrestclassifi:[33,61],onli:[0,5,12,15,24,25,26,31,32,34,36,38,39,42,44,49,50,54,56,59,61,64,65],onnx:[1,5,6,8,11,15,20,27,28,31,33,34,35,36,37,42,43,44,51,52,53,55,57,59,60,62,63,64,65],onnx_byt:36,onnx_cdist:59,onnx_conv:[36,39],onnx_convert:61,onnx_fct:23,onnx_file_or_byt:0,onnx_help:[0,17,38],onnx_nam:[0,12,19],onnx_nod:50,onnx_op:[10,13,14,21,22,23,31,45,46,48,49,50,53,61],onnx_oper:[14,31,47,53],onnx_operator_mixin:10,onnx_opset_vers:[0,32,56],onnx_output:[4,18,37,60],onnx_pars:61,onnx_pr:21,onnx_proto:[13,14,53],onnx_shape_calcul:[10,61],onnx_step:[4,18,37,60],onnx_typ:0,onnxadd:[23,50,53],onnxarrayfeatureextractor:21,onnxcast:[14,31,50,53],onnxcdist:3,onnxclip:53,onnxconcat:53,onnxconverter_common:[4,13,18,46],onnxcustom:50,onnxdiv:[10,50],onnxeig:50,onnxeyelik:50,onnxgatherel:50,onnxgemm:[48,49],onnxgreat:14,onnxident:[14,53],onnxinfer:[29,30,33,34,36,37,39,42,44,45,47,48,49,50],onnxless:53,onnxmatmul:[45,48,49,50],onnxml:20,onnxmltool:[20,24,25,42,43,44,52,56,57,58,62],onnxmul:[21,23,50,53],onnxoper:50,onnxoperatormixin:[0,10,50,61],onnxpad:22,onnxpipelin:36,onnxpow:50,onnxreducemax:14,onnxreducemean:50,onnxreducesum:21,onnxrt:[29,30,33,34,36,37,39,42,44,45,47,48,49,50],onnxruntim:[0,1,2,4,5,6,7,8,9,11,12,13,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,55,56,57,58,59,60,61,66],onnxruntime1:40,onnxruntime_pybind11_st:[15,24,39,40],onnxruntimeerror:15,onnxshap:50,onnxsub:[10,45,48,49,50],onnxsubestim:[14,31,47,53],onnxtransform:[36,40],onnxtranspos:[22,50],onto:0,onx1:[6,54],onx2:[6,8,11,35,36,39,48,54],onx3:[11,35,36],onx4:[35,36],onx5:35,onx64:16,onx64_std:16,onx6:[35,36],onx:[3,8,9,10,11,16,19,21,29,30,32,35,36,37,38,39,41,44,45,46,47,48,49,50,53,56,58],onx_label:53,onx_pr:[4,18],onx_proba:53,onxinf:54,oob_scor:61,op:[3,12,13,14,19,31,45,46,47,48,49,50,53,60,61],op_domain:[0,12],op_map:31,op_nam:[46,50],op_typ:[0,3,17,19,21,22,23,32],op_vers:[0,3,10,14,21,22,23,26,31,45,47,48,49,50,53,61],opeig:50,open:[2,7,9,12,15,17,18,23,24,25,26,38,40,42,44,52,54,56,58],openmp:30,oper:[0,1,3,4,10,11,12,13,14,16,17,18,19,26,27,28,31,32,33,35,36,37,43,45,46,47,48,49,51,53,55,56,57,58,59,60,63,66],operand:61,operator_convert:[0,24,25,42,43,44,52,59,60],operator_nam:[46,50],oppos:52,opposit:61,opruncustom:50,ops_cpu:50,opset:[0,19,28,45,46,47,50,55,56,57,61,63],opset_import:[3,17,19,21,22,23,32],opset_ml:32,opt:33,optic:61,optim:[6,30,33,50,58,59,60,61],optimis:[11,35],option:[0,4,5,6,7,8,9,11,14,15,16,17,18,24,25,26,28,30,34,36,37,38,39,41,42,43,44,49,50,52,55,56,57,60,61,63,65],optionalgetel:61,optionalhasel:61,opv:[10,14,31,45,47,48,49,50,53],ord_onx:31,ordenc_to_sklearn:31,order:[16,36,41,59,60,61],ordin:31,ordinal_encod:31,ordinal_encoder_convert:31,ordinal_encoder_shape_calcul:31,ordinalencod:61,org:[4,9,18,29,33,40,61],origin:[17,21,26,31,59,60,61],original_model:22,ort2:36,ort3:36,ort4:36,ort5:36,ort6:36,ort:[3,22,30,36],ortfail:24,orthogon:61,orthogonalmatchingpursuit:61,orthogonalmatchingpursuitcv:61,os:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,40,54,60],ot:61,other:[2,12,15,18,22,23,28,35,36,38,46,54,55,56,57,58,59,60,61,65],otherwis:[0,3,61],ouput:50,our:[0,61,62],out:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,61],outcom:[61,65],outer:61,outgo:61,outlier_label:61,output:[0,1,3,6,7,10,12,13,14,15,16,18,19,21,22,23,27,28,31,32,33,36,37,40,43,45,46,47,48,50,53,54,55,57,59,60,61,63,65],output_class_label:[0,33],output_count:61,output_count_rang:[0,31],output_dim:61,output_dimens:61,output_half_pixel:61,output_height:61,output_i:61,output_indic:61,output_inverse_indic:61,output_label:[11,17,19,33,35],output_nam:[0,3,10,14,15,21,22,23,31,40,41,45,47,48,49,50,53],output_pad:61,output_prob:[17,19,33],output_rang:50,output_rank:61,output_s:61,output_sequ:61,output_shap:61,output_spatial_shap:61,output_typ:[31,45,46,47,48,49,50],output_width:61,outputcodeclassifi:61,outsid:61,over:[0,61],overcom:36,overflow:61,overlap:61,overload:[37,61],overwrit:[0,37,50,61],overwritten:0,ovr:61,own:[1,13,14,22,26,27,62,64],p1:[29,36,45,46,47,48,49,50],p2:[29,36,45,46,47,48,49,50],p:[12,41,45,46,48,49,50,61],pa_pad:22,pack:61,packag:[0,2,4,9,18,22,23,25,33,38,44,46,50,56,60,61,64,65],pad:[22,40,61],pad_shap:61,padding_mod:61,page:30,pai:[45,47],pair:61,pairwis:[3,59],pale:40,panda:[3,4,7,17,18,30,39,43,48,52,53,54],paper:61,par:12,parallel:[0,30,61],param:[0,12,44,45,46,47,48,49,50,61],param_grid:61,paramet:[0,5,6,10,12,13,14,16,31,32,34,41,43,46,50,56,59,61],parch:[7,17],parent:[19,61],park:40,pars:[0,19],parse_sklearn:0,parse_sklearn_model:[0,19],parsefromstr:23,parser:[28,31,53,55,57,61,65],parser_fct:0,part:[4,7,12,17,18,26,33,50,59,61,62,63,66],partial:61,partial_fit:53,particular:[0,3,16,61],partit:26,pass:[25,26,44,52,60,61,65],passiveaggressiveclassifi:61,passiveaggressiveregressor:61,passthrough:[31,39,61],past_vers:50,patchwork:47,path:[6,33,40],path_length0_cast:32,path_length0_labelencod:32,path_length0_output0:32,path_length0_reshap:32,path_length0_reshapecst:32,path_length0_reshaped0:32,path_length0_y0:32,path_length1_cast:32,path_length1_labelencod:32,path_length1_output0:32,path_length1_reshap:32,path_length1_reshaped0:32,path_length1_y0:32,path_length2_cast:32,path_length2_labelencod:32,path_length2_output0:32,path_length2_reshap:32,path_length2_reshaped0:32,path_length2_y0:32,pattern:59,pb:61,pca:[4,12,18,47,59,61],pca_:47,pclass:[7,17],pclassout:17,pd:[4,7,17,18,53,54],pdf:61,pe:12,pedest:40,peephol:61,pei:56,penalti:[38,61],per:[30,35,43,54,61],percentil:61,perceptron:61,perfect:36,perform:[58,61],perhap:26,perm:[22,61],permut:61,pf:61,pformat:33,pharo:40,phase:[0,61],pi:[6,61],pickabl:33,picket:40,pickl:[45,47,50],piec:[13,14,60,63],pil:40,pipe2:40,pipe:[4,18,24,25,33,37,39,40,42,44,52,59],pipelin:[0,1,9,12,13,14,19,27,28,39,45,47,52,55,56,57,58,62,63,65],pipeline2dot:39,pipeline_lightgbm:[24,42],pipeline_onnx_mixin:10,pipeline_tfidf:26,pipeline_titan:[7,17],pipeline_titanic_nozipmap:7,pipeline_titanic_num:17,pipeline_titanic_numer:17,pipeline_titanic_text:17,pipeline_titanic_textu:17,pipeline_transpose2x:22,pipeline_tsn:12,pipeline_xgboost:[25,44],pixel:61,pkl:18,pl:40,place:61,plai:[1,10,14,27,61],plain:61,plan:21,platform:[29,40,61],pleas:[4,9,15,18,33,61],plinth:40,plot:[3,12,29,30,32,36,39,40,43,46],plot_abegin_convert_pipelin:[29,55],plot_backend:[2,27],plot_bbegin_measure_tim:[30,55],plot_benchmark_cdist:[3,27],plot_benchmark_pipelin:[4,27],plot_black_op:[5,27],plot_cast_transform:[6,27],plot_catwoe_transform:[31,55],plot_cbegin_opset:[32,55],plot_complex_pipelin:[7,27],plot_convert_decision_funct:[8,27],plot_convert_model:[9,27],plot_convert_syntax:[10,27],plot_convert_zipmap:[11,27],plot_custom_model:[12,27],plot_custom_pars:[13,27],plot_custom_parser_altern:[14,27],plot_dbegin_opt:[33,55],plot_dbegin_options_list:[34,55],plot_dbegin_options_zipmap:[35,55],plot_ebegin_float_doubl:[36,55],plot_embed:12,plot_errors_onnxruntim:[15,27],plot_fbegin_investig:[37,55],plot_gallery_imag:40,plot_gbegin_cst:[38,55],plot_gbegin_datafram:[39,55],plot_gbegin_transfer_learn:[40,55],plot_gconvert:[41,55],plot_gexternal_lightgbm:[42,55],plot_gexternal_lightgbm_reg:[43,55],plot_gexternal_xgboost:[44,55],plot_gpr:[16,27],plot_graphviz:[29,33,34,37,39,42,44,45,47,48,49,50],plot_icustom_convert:[45,55],plot_intermediate_output:[17,27],plot_investigate_pipelin:[18,27],plot_jcustom_syntax:[46,55],plot_kcustom_converter_wrapp:[47,55],plot_lcustom_opt:[48,55],plot_log:[19,27],plot_mcustom_pars:[49,55],plot_metadata:[20,27],plot_nmf:[21,27],plot_onnx_oper:[22,27],plot_pextend_python_runtim:[50,55],plot_pipelin:[23,27],plot_pipeline_lightgbm:[24,27],plot_pipeline_xgboost:[25,27],plot_qextend_onnxruntim:[51,55],plot_tfidfvector:[26,27],plot_usparse_xgboost:[52,55],plot_wext_pyod_forest:[53,55],plot_woe_transform:[54,55],plscanon:61,plsregress:61,plssvd:61,plt:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,32,36,40,43,54],plu:61,plus2_0_c0:32,plus2_0_cast:32,plus2_0_great:32,plus2_0_output0:32,plus2_1_c0:32,plus2_1_cast:32,plus2_1_great:32,plus2_1_output0:32,plus2_2_c0:32,plus2_2_cast:32,plus2_2_great:32,plus2_2_output0:32,pn:12,png:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54],po:61,point:[0,12,40,61],poissonregressor:61,poli:61,polici:26,polynomialcountsketch:61,polynomialfeatur:61,pool:[0,61],pool_int64:61,pool_str:61,pooled_shap:61,popul:[37,61],pos_label:61,posit:61,possibl:[0,19,22,28,31,34,36,38,39,41,55,57,58,59,60,61,63,66],post:[26,41,61],post_transform:[17,19,32,61],poster:26,potenti:61,pow:[32,61],power:61,power_t:61,powertransform:61,pprint:[3,7,16,17,33,38,39],pr:[59,65],prabhat:56,prai:26,prd:54,pre_dispatch:61,preced:0,precis:[16,26,61],precisions_init:61,precomput:61,precprocessor:60,pred:[12,13,14,21,45,46,47,48,49,50],pred_onx64:16,pred_onx64_std:16,pred_onx:[7,9,12,16,17,19,24,25,26,42,44,52,56,58],pred_ort:29,pred_pyrt:29,pred_skl:[16,29],pred_std:16,predect:40,predict:[1,2,4,6,12,13,14,15,16,18,21,22,26,27,30,32,33,36,37,38,43,45,49,50,51,53,56,58,60,61,63],predict_add:32,predict_addcst:32,predict_c01:32,predict_c0:32,predict_cast:32,predict_less:32,predict_mul:32,predict_mulcst:32,predict_onnx:21,predict_output0:32,predict_proba:[4,7,8,13,14,17,18,24,25,26,37,39,42,44,52,53],predict_with_onnxruntim:[10,22],predictable_tsn:12,predictable_tsne_convert:12,predictable_tsne_shape_calcul:12,predictabletsn:12,predictor:[13,14,21,25,45,47,52,60,65],prefit:61,prefix:[0,12,26,41,50,60,61],prelu:61,prepar:2,prepend:61,preprocess:[4,6,7,9,17,18,22,24,25,31,33,36,37,39,40,42,44,45,47,52,53,60,61],preprocessor:[7,17,39,60,61],present:[52,61],preview:61,previou:[16,32,36,50,52,58,59,60,61],previous:61,print:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,57,60,61,65],prior:61,prob:[40,61],prob_a:61,prob_b:61,proba:2,probabili:[11,33,35],probabilit:33,probability_tensor:[17,33,41],probabl:[1,2,7,13,14,15,17,19,27,33,35,36,38,39,40,41,45,49,53,59,61],probil:39,probit:61,problem:[35,60],process:[12,19,35,41,45,46,48,52,60,61],produc:[0,4,5,7,11,12,13,14,15,17,18,20,26,31,33,34,35,36,37,38,40,49,50,59,60,61,63],producer_nam:[3,17,19,20,21,22,23,32],producer_vers:[3,17,19,20,21,22,23,32],product:[20,21,29,61],program:[4,9,18,25,29,33,38,44,61],progress:[0,26,59],proj2:40,proj:40,project:[13,14,25,40,45,47,65],projet:40,promot:38,proper:58,properti:0,propos:[12,14,40,59,61],proposed_nam:41,proto:[13,14,22,53],proto_dtyp:[46,50],protobuf:22,provid:[0,58,61],prune:61,pseudo:61,pseudocod:61,pt:12,ptsne_knn:12,purpos:[33,60],put:0,py:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,60],pydot_graph:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],pyod:[28,55,57,64],pyod_iforest_convert:53,pyod_iforest_pars:53,pyod_iforest_shape_calcul:53,pyodiforest:53,pyplot:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,32,36,40,43,54],pyquickhelp:[29,33,34,37,39,42,44,45,47,48,49,50,57,65],pyre2:59,pyrene:40,pyrt:30,python39:[4,9,18,25,33,38,44],python:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,28,30,31,32,33,34,35,36,38,39,40,41,42,43,44,45,46,47,48,49,51,52,53,54,55,57,59,61,66],python_compil:[29,30,33,36,49],pythonapioverview:22,pytorch:[40,61],pytorch_half_pixel:61,pyx:36,q:[17,61],qlinearconv:61,qlinearmatmul:61,qmax:61,qmin:61,quadraticdiscriminantanalysi:61,qualifi:26,quantil:61,quantile_rang:61,quantileregressor:61,quantiletransform:61,quantiz:61,quantizelinear:61,question:56,quit:[12,16,22,40],r1:14,r2:14,r3:14,r:[12,15,31,32,33,36,40,41,61],r_:12,r_adjust:61,rad:40,radiu:61,radiusneighborsclassifi:[33,61],radiusneighborsregressor:[33,61],radiusneighborstransform:61,rais:[0,5,10,12,13,14,15,25,29,31,33,34,36,40,45,46,47,48,49,50,60,61,65],randint:43,randn:[3,43],random:[3,9,19,24,25,30,42,43,44,45,47,52,53,60,61],random_project:61,random_st:[6,12,26,29,30,33,44,53,61],randomforest:[0,60],randomforestclassifi:[9,33,39,52,56,61],randomforestregressor:[29,30,33,61],randomizedsearchcv:61,randomnorm:61,randomnormallik:61,randomst:53,randomtreesembed:61,randomuniform:61,randomuniformlik:61,rang:[0,6,12,21,31,32,36,40,43,47,61],rank:[15,21,61],rankdir:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],ransacregressor:61,rate:61,rather:[58,61],ratio:[43,61],ration:33,ravel:[6,12,29,36,37,43,44,45,46,47,48,49,50,52,53,54],raw:[0,1,7,12,17,27,33,40,61],raw_nam:0,raw_oper:[0,12,13,14,19,31,43,45,46,47,48,49,50,53],raw_scor:[0,8,59],rb:[18,23,40,61],rbb:61,rbbi:61,rbc:61,rbf:[16,61],rbfsampler:61,rbh:61,rbi:61,rbo:61,rbr:61,rbz:61,rc:61,re2:59,re:[3,8,10,11,12,15,21,22,26,31,32,33,35,38,40,43,53,59,61],re_reduced0:37,re_reducesum:21,re_reducesumsquar:37,reaason:37,reach:[4,9,18,33,38],read:[23,40,44,61],read_csv:[7,17],readi:[7,17,26],real:16,realli:[7,17,33],rearrang:61,reason:[16,37,61],reassignment_ratio:61,rec:21,recal:26,receiv:60,recent:[0,7,32,56],reciproc:61,recommand:21,recommend:61,record:52,rectifi:61,recurr:61,recurs:60,redict:29,reduc:[1,26,27,40,43,50,61],reducel1:61,reducel2:61,reducelogsum:61,reducelogsumexp:61,reducemax:[13,61],reducemean:61,reducemin:61,reduceprod:61,reducesum:[21,61],reducesumsquar:[37,61],reducevar:61,reduct:61,ref:46,refer:[4,9,18,33,50,61],referenc:[59,61],refit:61,reflect:[16,61],reg1:[29,30],reg2:[29,30],reg3:[29,30],reg:43,reg_covar:61,regex:59,region:[36,54,61],regist:[13,14,21,31,33,45,47,52,53,54,60,64,65],register_oper:50,registered_model:0,registr:[13,14,48,49,53,65],regress:[2,4,9,15,18,20,29,33,36,61],regressor:[0,1,12,27,28,40,45,49,55,57,61,62],regressorchain:61,regular:[26,59,61],rel:[29,61],relat:[20,56,61],releas:[25,43,44,56,66],reli:[0,22,29],religion:26,relu:61,remain:61,remaind:[39,61],remark:26,remov:[0,4,6,7,11,17,18,25,26,33,35,36,37,38,44,59,61],remove_ident:0,remove_zero_eig:61,renam:[0,59],rename_result:41,renorm:61,rep:2,repeat:[3,11,30,35,48,59,61],repl:52,replac:[6,15,16,33,48,59,61],replace_nan:52,replaced_value_float:[17,61],replaced_value_int64:61,replacetransform:[52,61],replai:18,replic:26,repres:[60,61],represent:61,reproduc:12,request:[40,45,47,58,61],requir:[0,6,7,12,16,21,22,24,26,30,32,40,44,45,47,50,51,58,59,60,61,66],res2:[8,11,35],res3:[11,35],res4:35,res5:35,res6:35,rescal:61,reserv:[19,61],reset:61,reshap:[7,10,17,22,32,39,40,53,54,61],residu:61,residual_threshold:61,resiz:[40,61],resolut:61,resolv:67,respect:[0,61],respons:61,rest:[0,61],restor:[18,25,44,52],restrict:61,result:[11,12,13,14,16,28,31,33,35,36,38,49,50,54,55,57,59,60,61,63,65],retain:[11,35,61],retriev:[0,17,33,38,45,46,47,48,50,53,58,61],return_cov:33,return_std:33,return_train_scor:61,return_x_i:29,reus:[4,7,12,13,14,17,18,36,47,61],revers:[40,61],reversesequ:61,rewrit:[14,21],rewrite_op:36,rewritten:[0,47,58],rf:[29,30,39,61],rf_iri:[9,56],rfe:61,rfecv:61,rh:61,ri:61,ridg:61,ridgeclassifi:61,ridgeclassifiercv:61,ridgecv:61,right:[36,40,43,54,61],rightarrow:12,rmax:14,rnn:61,ro:61,robustscal:[33,61],roi:[56,61],roi_end:61,roi_start:61,roi_width:61,roi_x:61,roialign:61,root:[45,46,47,48,49,50,61],rotat:40,round:[36,40,61],round_prefer_ceil:61,round_prefer_floor:61,row:[12,21,45,47,58,60,61],row_index:21,row_indic:21,rr:61,rt:[3,4,7,8,9,11,12,13,14,15,16,17,18,19,24,25,26,35,42,44,52,56,58,61],rule:[36,59,61],run:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,60,61],run_python_script_2329367799808:65,runnabl:60,running_mean:61,running_var:61,runopt:39,runtim:[1,5,16,20,27,28,32,33,34,36,40,49,55,56,57,60,61,63],runtimeerror:[5,10,13,14,15,16,31,32,34,40],rz:61,s1:61,s2:61,s:[0,2,3,4,5,6,7,8,10,12,13,14,16,17,18,19,20,21,22,23,24,25,26,30,31,32,33,34,36,39,40,42,43,44,45,46,47,48,49,52,57,58,59,60,61,66],s_:10,saga:38,sake:[49,61],salient:61,salli:61,same:[0,2,4,7,11,12,13,14,15,17,26,29,30,31,33,35,36,39,43,45,46,47,48,49,50,51,52,53,54,59,60,61,65],same_low:61,same_upp:61,samm:61,samoi:40,samoyed:40,sampl:[12,29,32,61],sample_s:61,sample_weight:[12,13,14,45,46,47,48,49,50],sampler:61,sampling_ratio:61,satur:61,save:[0,7,12,17,24,25,26,29,42,44,61],save_onnx_model:[0,17],saved_mean:61,saved_var:61,sc_data:53,scalar:61,scale1:52,scale:[4,6,9,12,17,18,33,53,61],scale_:53,scaled_cent:53,scaledtanh:61,scaler1:60,scaler2:60,scaler3:60,scaler:[6,7,10,12,17,24,25,33,36,37,42,44,53,60,61],scan:[59,61],scan_1:61,scan_i:61,scan_input:61,scan_input_ax:61,scan_input_direct:61,scan_j:61,scan_m:61,scan_out_1:61,scan_out_k:61,scan_output:61,scan_output_ax:61,scan_output_direct:61,scan_output_el:61,scanloop:61,scatter:61,scatterel:61,scatternd:61,scenario:[43,57,58,61],scientif:61,scientist:56,scikit:[0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,30,31,32,33,35,36,37,40,42,44,45,47,52,53,55,57,58,59,60,63,64,65],scikit_learn:40,scipi:[1,7,17,27,57,61],sclice:61,scope:[12,13,14,31,43,45,46,47,48,49,50,53,59,60,61],scope_inst:0,score:[1,26,27,32,33,37,53,61],score_func:61,score_sampl:[5,33,34],score_threshold:61,scores_2d:53,scores_:53,scott:56,scratch:0,screen:40,script:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],seamlessli:0,second:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,58,59,61],second_dim:31,section:[0,7,12,38,65],see:[0,5,6,7,8,10,11,12,15,16,17,20,24,25,26,33,34,35,39,40,42,43,44,45,46,47,48,49,58,59,60,61],seed:[0,61],seem:[16,30,33,35,50],seen:[39,61],select:[40,49,61],select_last_index:61,select_model_inputs_output:[0,17,38],selected_indic:61,selectfdr:61,selectfpr:61,selectfrommodel:61,selectfw:61,selectkbest:61,selectpercentil:61,self:[10,12,13,14,26,39,45,46,47,48,49,50,61],selftrainingclassifi:61,selu:61,semant:61,semi_supervis:61,semit:26,sens:[33,37],sensit:61,sent:[12,40],sentanc:59,sep:[0,26,59],separ:[0,18,26,33,38,59,61],seq:61,seq_len:61,seq_length:61,sequenc:[26,35,61],sequence_len:61,sequence_length:61,sequence_typ:[17,19],sequenceat:61,sequenceconstruct:61,sequenceempti:61,sequenceeras:61,sequenceinsert:61,sequencelength:61,sequencetyp:19,sequenti:61,sequentialfeatureselector:61,seri:[40,54,61],serializetostr:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,29,30,31,33,34,35,36,37,38,39,41,42,43,44,45,46,47,48,49,52,53,54,56,58,60],servic:58,sess1:6,sess2:[5,6,8,11,34,35,36,39,48],sess3:[11,35,36],sess4:[35,36],sess5:35,sess64:16,sess64_std:16,sess6:[35,36],sess:[3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,29,30,31,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,52,53,54,56,58,60],sess_split:43,set1:12,set:[0,12,19,25,33,35,39,44,52,58,60,61,66],set_index:[30,43],set_onnx_name_prefix:50,set_param:12,set_titl:[12,32,36,40],set_vis:[29,33,34,37,39,42,44,45,47,48,49,50],set_xlabel:36,set_xtick:12,set_ylabel:36,set_ytick:12,setlevel:[0,19],sever:[15,35,52,61],sex:[7,17],sexout:17,sg:61,sgdclassifi:[0,33,60,61],sgdoneclasssvm:61,sgdregressor:61,shall:61,shape2:19,shape:[0,3,4,6,7,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,30,31,32,35,36,37,39,40,41,42,43,44,45,46,47,48,49,52,53,58,61,65],shape_calc:[12,13],shape_calcul:[0,10,24,25,42,43,44,52,60],shape_fct:0,shape_object:50,shape_tensor:17,shapeobject:50,share:[13,39,61],sharedacross:61,shauheen:56,sheng:56,shift:61,shine:26,ship:7,shortcut:[39,50],shouheng:56,should:[0,6,10,29,33,36,39,44,45,46,49,53,56,59,60,61],show:[12,18,32,36,38,40,41,43,47,49,50,51,52,60,61,62,64,65],shown:[4,7,9,18,33,61],shown_imag:12,shrink:61,shrinkag:61,shrunkcovari:61,shuffl:[24,25,42,44,52,61],si_1:61,si_m:61,sibsp:[7,17],siddiqui:56,side:61,sig:[12,13,14],sigma_0:16,sigmoid:61,sign:61,signatur:[0,12,13,14,45,47],signific:[33,36,43],sim:21,similar:[30,61],similarli:61,simpl:[2,6,8,12,20,23,30,32,36,45,47,57,61],simple_onx:38,simplefilt:52,simpleimput:[7,17,60,61],simpli:[7,17,61],simplic:61,simplifi:[0,60],simplified_model:38,sin:61,sinc:61,since_vers:50,sine:61,singl:[7,15,16,17,26,29,30,43,59,61,65],sinh:61,site:[4,9,18,25,33,38,44,58],situat:[15,36,61],size32:16,size64:16,size:[0,12,30,40,61],skewedchi2sampl:61,skip:[0,8,22,61],skl2:36,skl2onnx:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,57,58,59,60,61,65],skl2onnx_convert_lightgbm:43,skl3:36,skl4:36,skl5:36,skl6:36,skl:[4,8,18,30,31,36],skl_ord:31,skl_output:[4,18,37,60],sklapi:[6,31,36,40,52,54],sklearn:[0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,44,45,46,47,48,49,50,52,53,57,58,60,62,64,65],sklearn_model:0,sklearn_op:61,sklearndecisiontreeclassifi:19,sklearndecorrelatetransform:[45,46,47,48,49],sklearnlinearclassifi:[0,41,60],sklearnlivedecorrelatetransform:50,sklearnmodelcontain:0,sklearnmodelcontainernod:0,sklearnzipmap:19,sklordinalencod:31,skorch:40,slice:61,slope:61,slower:[35,43],small:[6,7,33,36,61],smaller:[4,18,45,46,47,59,61],smallest:61,smooth_idf:61,snippet:[16,38,60,61],so:[0,4,7,13,14,17,21,24,25,33,42,44,45,47,61,65],so_1:61,so_k:61,softmax:[44,61],softmax_zero:61,softmaxcrossentropyloss:61,softplu:61,softprob:[25,44,52],softsign:61,solar:40,solut:[6,38],solv:[6,36,50,51],solver:[4,7,9,13,14,17,18,33,38,41,60,61],solver_opt:61,some:[0,5,10,13,14,16,22,34,36,37,44,52,59,60,61,62],someth:12,sometim:32,sort:[16,33,40,57,61],sourc:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,59,60,61],space:[12,26,61],spacetodepth:61,spars:[12,28,39,55,57,60,61,67],sparse_output:61,sparse_threshold:[52,61],sparse_valu:61,sparsecod:61,sparsepca:61,sparserandomproject:61,spatial:[3,61],spatial_dimens:61,spatial_scal:61,special:61,specif:[0,6,7,12,17,20,22,32,33,36,40,45,46,47,50,52,56,60,61,63,66],specifi:[0,12,13,14,32,33,41,50,56,59,61],spectralbiclust:61,spectralclust:61,spectralcoclust:61,speed:[33,50,61],sphinx:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],sphinx_runpython_extens:65,sphinxext:65,splinetransform:61,split:[11,26,33,35,59,61],splitter:61,splittosequ:61,sq_sqrt:37,sqrd:61,sqrt:[37,61],squar:[45,46,47,48,49,61],square_sum:61,squared_error:61,squared_hing:61,squeez:[16,61],squeezenet0_concat0:40,squeezenet0_concat1:40,squeezenet0_concat2:40,squeezenet0_concat3:40,squeezenet0_concat4:40,squeezenet0_concat5:40,squeezenet0_concat6:40,squeezenet0_concat7:40,squeezenet0_conv0_fwd:40,squeezenet0_conv10_fwd:40,squeezenet0_conv11_fwd:40,squeezenet0_conv12_fwd:40,squeezenet0_conv13_fwd:40,squeezenet0_conv14_fwd:40,squeezenet0_conv15_fwd:40,squeezenet0_conv16_fwd:40,squeezenet0_conv17_fwd:40,squeezenet0_conv18_fwd:40,squeezenet0_conv19_fwd:40,squeezenet0_conv1_fwd:40,squeezenet0_conv20_fwd:40,squeezenet0_conv21_fwd:40,squeezenet0_conv22_fwd:40,squeezenet0_conv23_fwd:40,squeezenet0_conv24_fwd:40,squeezenet0_conv25_fwd:40,squeezenet0_conv2_fwd:40,squeezenet0_conv3_fwd:40,squeezenet0_conv4_fwd:40,squeezenet0_conv5_fwd:40,squeezenet0_conv6_fwd:40,squeezenet0_conv7_fwd:40,squeezenet0_conv8_fwd:40,squeezenet0_conv9_fwd:40,squeezenet0_dropout0_fwd:40,squeezenet0_flatten0_reshape0:40,squeezenet0_pool0_fwd:40,squeezenet0_pool1_fwd:40,squeezenet0_pool2_fwd:40,squeezenet0_pool3_fwd:40,squeezenet0_relu0_fwd:40,squeezenet0_relu10_fwd:40,squeezenet0_relu11_fwd:40,squeezenet0_relu12_fwd:40,squeezenet0_relu13_fwd:40,squeezenet0_relu14_fwd:40,squeezenet0_relu15_fwd:40,squeezenet0_relu16_fwd:40,squeezenet0_relu17_fwd:40,squeezenet0_relu18_fwd:40,squeezenet0_relu19_fwd:40,squeezenet0_relu1_fwd:40,squeezenet0_relu20_fwd:40,squeezenet0_relu21_fwd:40,squeezenet0_relu22_fwd:40,squeezenet0_relu23_fwd:40,squeezenet0_relu24_fwd:40,squeezenet0_relu25_fwd:40,squeezenet0_relu2_fwd:40,squeezenet0_relu3_fwd:40,squeezenet0_relu4_fwd:40,squeezenet0_relu5_fwd:40,squeezenet0_relu6_fwd:40,squeezenet0_relu7_fwd:40,squeezenet0_relu8_fwd:40,squeezenet0_relu9_fwd:40,squeezenet1:40,squeezenet:40,src:[25,44,52],st:[45,47,50],st_1:61,st_n:61,stabl:[4,9,18,33,61],stack:61,stack_method:61,stackingclassifi:[33,61],stackingregressor:61,stage:50,stai:52,standard:[12,19,32,60,61],standardscal:[1,7,12,17,24,25,27,36,37,42,44,52,60,61],start1:61,start:[4,7,12,15,17,18,25,29,31,44,52,53,56,61,62],start_i:61,start_x:61,startn:61,startswith:[12,26],stat:[26,52],state:61,state_vari:61,statist:61,statu:[4,9,18,33],std:[10,12,33,37],steal:[4,18],step:[6,7,8,9,12,17,24,29,33,36,39,40,42,52,59,60,61],stick:16,still:[7,29,33,36,61,65],stmt:[3,5,34],stochast:61,stop:[4,9,18,33,61],stop_n_inli:61,stop_prob:61,stop_scor:61,stop_word:61,stopword:61,storag:61,storage_ord:61,store:[12,16,23,28,33,36,55,57,58,60,61,63],store_covari:61,store_cv_valu:61,stori:60,str:[12,16,17,39,60],strategi:[7,17,60,61],strfeat:60,strict:[45,46,53,61],strictvers:43,stride:61,strides_spatial_shap:61,string:[0,3,4,7,11,17,18,19,22,26,32,33,35,39,50,58,59,61],string_vocabulari:61,stringnorm:61,stringtensortyp:[0,7,17,26,39,52,58,59,60],strip_acc:61,strip_newsgroup_foot:26,strip_newsgroup_quot:26,structur:[59,60],studi:12,style:[5,6,7,10,12,13,14,17,22,24,25,26,54,61],su_c0:49,su_subcst:49,sub:[26,31,46,61],sub_nam:46,subgraph:[17,61],subject:[26,52],subjectbodyextractor:26,sublinear_tf:61,submatric:61,submit:56,submodel:[12,13],subplot:[5,6,7,10,12,13,14,17,22,24,25,26,32,36,40,54],subsampl:61,subsequ:61,subset:[26,61],substitut:61,subtensor:61,subtract:61,success:[36,61],suffix:61,suggest:[0,16,61],sulphur:40,sum:[12,32,35,43,52,61],sum_:43,summar:61,summari:[56,61],summat:[43,61],sup_x:36,suppli:61,support:[0,6,7,17,26,29,32,33,36,39,40,48,50,52,56,59,60,66],supported_convert:0,suppos:61,sure:[0,45,47],surviv:[7,17],svc:[32,33,61],svd:61,svd_solver:61,svm:61,svmclassifi:61,svmregressor:61,svr:61,swim:40,swith:7,sy:[36,40],symbol:61,syntax:[14,59],system:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60,61],t1:61,t2:61,t3:61,t4:61,t5:61,t:[7,17,21,26,35,36,45,46,47,48,49,50,60,61],t_:[12,43],t_i:43,tabl:61,take:[0,15,21,26,30,31,50,61],taken:[26,61],talk:26,tan:61,tangent:61,tanh:61,target:[0,2,4,8,9,11,12,13,14,15,16,18,19,24,25,26,31,32,33,35,38,41,42,44,45,47,52,56,58,61],target_:61,target_id:32,target_nodeid:32,target_opset:[0,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,29,30,31,32,34,35,38,42,43,44,45,47,48,49,50,52,53,56,61],target_treeid:32,target_typ:61,target_weight:[32,61],task:0,tb:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54],tell:[5,13,14,16,17,19,25,26,33,34,40,44,45,47,50,65],ten:54,tensor:[0,15,40,43,61],tensor_typ:[3,17,19,21,22,23,32],tensorflow:[40,61],tensorproto:[0,13,14,22,53,61],term1:61,term2:61,term:[26,58,61],termin:61,test:[0,2,7,12,22,26,30,31,32,45,47,50,51,56,57,60,61],test_data:26,test_decorrelate_transform:[45,47],test_investig:60,test_live_decorrelate_transform:50,test_siz:[7,17,44],text:[12,26,40,52],text_onnx:17,text_vectoris:59,textcoord:40,textstat:26,textual:17,tf:[40,52,61],tf_crop_and_res:61,tf_half_pixel_for_nn:61,tfare_cast:17,tfidf:[0,26,28,55,57,59,61,67],tfidftransform:[33,52,61],tfidfvector:[0,1,27,33,52,61],th:61,th_name:13,than:[15,17,23,30,32,37,50,58,61,62],thei:[0,4,7,30,34,35,52,61],theilsenregressor:61,them:[0,5,16,34,37,38,45,54,59,60,61],then_branch:61,theoret:21,therefor:[16,36,61],thi:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,58,59,60,61,65,66,67],thing:[45,47,61],think:61,third:[10,13,14,35,61],this_oper:[13,14,31,49,53],those:[38,61],though:[16,61],thought:61,three:[9,13,14,15,60,61],threshold:[13,14,36,53,61],threshold_:53,threshold_lambda:61,thresholdedrelu:61,through:[1,16,26,27,37,38,59,61],thu:61,thumbnail:12,ti:61,ticket:[7,17],tiebreak:61,tile:61,tim:3,time:[2,3,4,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,31,32,33,36,37,38,39,40,41,42,44,45,46,47,49,50,51,52,53,54,58,60,61],time_axi:61,time_ort:3,time_scipi:3,timeit:[3,4,5,11,34,35,43],timer:3,tind:61,titanic3:[7,17],titanic_url:[7,17],titl:[12,30,43],tmp:61,to_dot:[29,33,34,37,39,42,44,45,47,48,49,50],to_drop:[7,17],to_float:61,to_int64:61,to_onnx:[0,2,3,5,6,10,13,14,15,21,22,23,29,30,31,32,33,34,35,36,37,38,39,41,43,44,45,46,47,48,49,50,52,53,54,59,61,65],to_onnx_ext:39,to_onnx_extend:36,to_onnx_oper:[10,61],to_sav:18,to_str:61,to_valu:61,todens:33,togeth:61,token:[0,26,59,61],token_pattern:61,tokenexp:[33,59],tokenis:26,tol:[60,61],tom:56,too:[12,36,43,50,61,64],tool:[0,5,6,7,10,12,13,14,17,21,22,23,24,25,26,31,54,58,60],toolkit:56,top:[32,61],topk:61,topolog:60,torch:[56,61],total:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,61],total_pad:61,toward:61,tp:60,tpng:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],tqdm:[3,30,43,52],tr:10,tr_mixin:10,tr_transposed0:22,track:20,trail:61,train:[0,1,12,13,14,15,20,21,27,28,31,36,40,45,50,55,56,57,60,61,63],train_data:[26,39],train_scor:53,train_siz:30,train_test_split:[5,6,7,8,9,11,12,13,14,16,17,19,29,30,33,34,35,36,38,41,44,56,58],train_xgb:44,trainabl:61,training_mod:61,tranform:12,transa:61,transb:61,transfer:[28,37,55,57,63],transform:[4,7,10,13,14,17,18,21,25,26,31,37,39,40,45,46,47,48,52,53,54,60,61,65],transformed_column:17,transformedtargetregressor:61,transformer_:12,transformer_list:61,transformer_weight:[26,61],transformermixin:[10,12,26,45,46,47,48,49,50],transformers_:60,translat:[22,61],transpos:[22,40,45,46,48,49,50,61],treat:61,treatment:61,tree:[6,19,36,43,61],treeensembl:43,treeensembleclassifi:[19,61],treeensembleregressor:[32,36,43,61],treeensembleregressordoubl:36,tri:60,triangular:61,trick:50,tricki:37,trilinear:61,trilu:61,trip:61,trip_count:61,truetru:19,truncat:[37,61],truncatedsvd:[26,60,61],trunk:40,truth:61,trv:50,tsne:12,tsne_outputs_:12,tsne_transform:12,tsvd:60,tune:52,tupl:[0,61],tutori:[30,40,48,56,58],tvariable1:17,tvariable1b:17,tweedieregressor:61,twenty_newsgroup:26,twice:[0,16,61],two:[0,12,13,14,16,21,22,26,28,30,32,33,36,37,43,45,47,48,50,52,54,55,57,58,59,60,61,65,66],type:[0,3,4,7,10,12,13,14,15,16,17,18,19,21,22,23,25,29,31,32,35,36,39,40,41,45,46,47,48,49,50,53,58,59,60,61,65],typeerror:52,typic:[6,61],u:[40,59,61],uint16:61,uint32:61,uint64:61,uint8:61,unabl:[13,14,25,26,40,45,47,53,65],unchang:61,undefin:[58,61],under:61,understand:[12,46,61],unexpect:[12,15,31,67],unfortun:[16,17,36],unidirect:61,uniform:61,union:[26,52,61],uniqu:[0,12,32,41,45,46,47,50,53,58,60,61],unit:[60,61],unit_vari:61,univari:61,unknown:[0,22,45,46,50,58,61],unknown_valu:61,unless:[50,52,58,61],unneed:[0,38],unnorm:61,unpool:61,unscal:33,unsign:61,unsqueez:61,until:[52,59,61],unus:[0,7,12],up:[4,17,26,33,50,59,60,61],updat:[0,19,32,36,60,61],update_indic:61,update_registered_convert:[0,12,13,14,24,25,31,42,43,44,45,46,47,48,49,50,52,53,60,65],update_registered_pars:[0,60],upload:40,upper:[36,41,61],uppercas:61,upsampl:61,upto:61,url:40,url_nam:40,urllib:40,urlopen:40,us:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,43,44,45,46,48,49,50,52,53,54,55,56,57,59,60,61,63,65,66],usag:61,use_gemm:48,use_idf:61,use_label_encod:[25,44,52],usenet:26,user:[0,5,7,21,25,33,37,38,41,44,52,59,60,61],user_defined_v:61,userwarn:[25,26,32,35,44,52],usual:[0,6,8,9,11,13,14,19,22,25,32,33,35,36,37,39,45,46,47,50,52,58,59,60,61,65],utf:61,util:31,v1:50,v2:[50,56],v:[7,12,17,33,45,46,47,48,49,50,61],v_1:61,v_2:61,v_final_and_scan_output:61,v_initi:61,v_k:61,v_m:61,v_new:61,va:[40,60],val_bin:13,val_label:[13,14],val_max:13,val_op:13,val_prob:[13,14],val_val:[13,14],val_y1:[49,53],val_y2:[49,53],valid:[13,14,61],validation_fract:61,validator_classifi:[13,14],validator_classifier_convert:[13,14],validator_classifier_pars:[13,14],validator_classifier_shape_calcul:[13,14],validatorclassifi:[13,14],valu:[0,4,7,11,12,16,17,18,22,33,35,36,37,39,40,45,46,50,52,56,59,61],value_:61,value_typ:[17,19],valueerror:[12,36,53],valueinfoproto:22,values_:61,values_float:32,values_int64:61,var_0:29,var_1:29,var_2:29,var_smooth:61,vari:61,variabl:[0,17,19,26,29,30,33,36,37,45,47,60,61],variable1:17,variable1_cast:17,variable2:17,variable2_cast:17,variad:61,varianc:61,variancethreshold:61,variou:61,vb:60,vc:60,vcat2:60,vcat:60,vect:26,vector:[1,7,15,17,27,50,54,61],ver:57,verbos:[0,1,22,27,37,46,53,61],verbose_feature_names_out:61,verbose_interv:61,veri:[6,9,16,17,19,45,46,49,53],versa:61,version:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,32,33,35,43,50,56,57,59,61,66],vert:36,via:[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,61],vice:61,view:61,vision:40,visual:[33,48],visualis:33,vocabulari:[38,61],volume12:61,vote:[29,61],votingclassifi:[33,61],votingregressor:[29,30,61],vstack:[35,40],w0:29,w:[10,21,59,61],w_:10,w_out:61,w_scale:61,w_zero_point:61,wa:[0,2,7,12,17,20,25,32,36,37,38,43,44,50,52,56,57,58,60,61],wai:[0,1,5,7,12,16,17,21,22,23,27,28,29,33,36,37,49,52,55,56,57,58,59,60,61,65],walk:[1,27],want:[0,5,36,37,50,60,61],warm_start:61,warn:[15,25,26,32,35,38,43,44,52],wavefront:61,wb:[2,7,9,12,15,17,18,23,24,25,26,38,40,42,44,52,56,58,61],wbb:61,wbbi:61,wbc:61,wbf:61,wbh:61,wbi:61,wbo:61,wbr:61,wbz:61,wc:61,we:[0,3,4,7,12,13,14,16,17,18,21,22,23,24,25,26,29,30,31,32,33,36,37,38,39,40,43,44,45,46,47,48,50,53,58,60,61,62],wei:56,weight:[12,26,31,54,61],weight_concentration_prior:61,weight_concentration_prior_typ:61,weight_tot:61,weights_init:61,weird:6,well:[0,16,29,32,41,45,46,47,49,61],wenb:56,went:[16,29],were:[43,60,61,64],wf:61,wh:[21,61],what:[0,5,17,19,28,33,34,35,36,39,45,46,47,48,49,50,55,57,59,60,63],when:[0,1,7,11,17,19,20,25,27,28,35,37,43,44,46,52,55,56,57,60,61,62,63,65],where:[7,11,12,21,32,35,36,39,43,58,61,63],wherea:61,whether:[0,21,30,61],which:[0,4,7,8,12,13,14,15,16,17,18,19,20,21,22,23,26,31,33,36,38,45,46,47,48,49,52,56,57,58,59,60,61],white:[0,5,34,40,61],white_list:17,white_op:[0,61],whitelist:[5,34],whiten:61,who:26,whole:[0,12,24,25,33,42,44,60],whose:[0,22,61],why:[0,7,12,25,26,29,30,36,37,43,44,58,60],wi:61,wide:61,width:61,wiki:61,wikimedia:40,wikipedia:[40,61],win64_release_1:[25,44,52],window:[40,61],winter:56,wise:61,wish:61,with_cent:61,with_mean:61,with_scal:61,with_std:61,within:[19,29,37,48,61],without:[0,2,19,26,35,58,61,63],wo:61,woe1:54,woe2:54,woe:[28,31,55,57,67],woe_encoder_convert:31,woe_encoder_pars:31,woe_encoder_shape_calcul:31,woe_onx:31,woeenc_to_sklearn:31,woeencod:[28,55],woetransform:[31,54],word:[0,59,61],work:[0,7,10,12,26,29,31,33,36,40,45,46,47,49,50,59,61],workspac:[25,44,52],world:61,would:[0,12,31,33,49,50,58,59,61,64],wr:61,wrap:[10,61],wrap_as_onnx_mixin:10,wrapper:[40,59],write:[1,2,7,9,13,14,15,17,23,24,25,26,27,31,38,40,42,44,46,50,52,56,57,58,61,62,63],write_dot:[5,6,7,10,12,13,14,17,21,22,23,24,25,26,54,60],written:[50,51],wrong:[4,18,29,60],ws:31,wvar_0:29,wvar_1:29,wvar_2:29,wz:61,x002:[17,36],x00:[17,36],x00ha:17,x01:[17,36],x01b:[17,36],x01x:36,x01z:17,x02:[17,36],x02to:17,x03age:17,x03sex:17,x04:[17,36],x04axi:17,x04cast:17,x04fare:17,x05cast1:17,x05scale:[17,36],x061:[17,36],x06:[17,36],x06concat:17,x06offset:[17,36],x06pclass:17,x06scaler:[17,36],x07:17,x07ai:[17,36],x07b:17,x07imput:17,x08:[17,36],x08age_cast:17,x08embark:17,x08skl2onnx:[17,36],x08variabl:[17,36],x0b:[17,36],x0c:[17,36],x0cshape_tensorz:17,x0e:[17,36],x0emerged_column:17,x0f:36,x10:[17,36],x10pipeline_titan:17,x11:36,x12:[17,36],x13:17,x14:[17,36],x14imputed_value_float:17,x14replaced_value_float:17,x15:17,x16:17,x18:[17,36],x1:61,x1_begin:61,x1_end:61,x1a:[17,36],x1e:17,x1emlprodict_onnx:36,x1f:17,x2:[50,61],x2_begin:61,x2_end:61,x32:[11,13,14,36,43,48],x56:41,x64:36,x7f:[17,36],x80:36,x82:36,x84:36,x89:36,x8ai9:36,x8c:36,x8d:36,x90:36,x96o:36,x98:36,x9a:17,x:[0,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,19,22,23,24,25,26,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,45,46,47,48,49,50,52,53,54,56,58,61,65],x_1:61,x_2:[12,61],x_3:12,x_4:12,x_avg:61,x_center:61,x_digit:[4,18],x_final:61,x_i:61,x_max:12,x_min:12,x_new:61,x_origin:61,x_resiz:61,x_scale:61,x_t:61,x_test2:[7,17],x_test:[5,6,7,8,9,11,12,13,14,16,17,19,29,30,33,34,35,36,38,41,44,56,58],x_test_tsne2:12,x_train:[5,6,7,8,9,11,12,13,14,16,17,19,29,30,33,34,35,36,38,40,41,44,56,58],x_train_tsn:12,x_train_tsne2:12,x_zero_point:61,xa0:[17,36],xa1:17,xa6:36,xa8:36,xac:36,xae:17,xavier:56,xb8:36,xbc:36,xbd:36,xbe:36,xbf:36,xc0:17,xc4b:36,xc8:36,xd2d:17,xd3:17,xd:12,xda5a:36,xda:36,xe0a:17,xe4:17,xe8:36,xe9:36,xeba:17,xed:36,xf6:36,xf8:36,xfa:36,xfc:36,xfd:36,xff:[17,36],xgb:44,xgbclassifi:52,xgboost:[1,27,28,52,53,55,56,57,60,62],xgboostxgbclassifi:[25,33,44,52],xgboostxgbregressor:44,xi_begin:61,xi_end:61,xi_test:[6,36],xi_train:[6,36],xor:61,xp:12,xs:61,xsf:36,xst:36,xt:[5,34,61],xuan:56,xy:40,xytext:40,y1:[49,61],y2:[49,61],y:[2,3,6,7,8,9,10,11,12,13,14,16,17,19,22,23,24,25,26,29,30,31,32,33,35,36,38,39,40,41,42,43,44,45,46,47,48,49,50,52,56,58,61],y_1:61,y_c:61,y_center:61,y_digit:[4,18],y_h:61,y_scale:61,y_test:[7,8,9,11,12,13,14,16,17,19,29,30,35,36,38,41,56,58],y_train:[6,7,8,9,11,12,13,14,16,17,19,29,30,33,35,36,38,41,44,52,56,58],y_zero_point:61,yd:12,ye:61,yellow:[5,6,7,10,12,13,14,17,22,24,25,26,40,54],yeo:61,yet:[13,14,25,32,45,47,52,60,61,65],yi:56,yi_test:36,yi_train:[6,36],yield:61,yield_op:[29,33,36,49],yields_op:[29,33,36,49],yiwen:56,you:[0,12,13,14,25,26,29,30,32,37,44,45,47,50,52,56,58,59,61,65],your:[1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54],ysf:36,yst:36,z0:59,z:[3,22,36,61],z_1:61,za:59,zahirazami:56,zeeshan:56,zero:[17,52,61],zero_point:61,zeropoint:61,zhu:56,zip:[1,7,17,22,28,40,60,61],zipmap:[0,1,7,14,17,19,24,25,27,38,39,41,42,44,52,61],zoo:[38,40],zp_1:61,zp_2:61,zp_m:61,zp_n:61,zrh:61,zs:61,zt:61},titles:["API Summary","Gallery of examples","ONNX Runtime Backend for ONNX","Compare CDist with scipy","Benchmark a pipeline","Convert a model with a reduced list of operators","Discrepencies with StandardScaler","Convert a pipeline with ColumnTransformer","Probabilities or raw scores","Train, convert and predict a model","Different ways to convert a model","Probabilities as a vector or as a ZipMap","Write your own converter for your own model","When a custom model is neither a classifier nor a regressor","When a custom model is neither a classifier nor a regressor (alternative)","Errors with onnxruntime","Discrepencies with GaussianProcessorRegressor: use of double","Walk through intermediate outputs","Investigate a pipeline","Logging, verbose","Metadata","Custom Operator for NMF Decomposition","Play with ONNX operators","Draw a pipeline","Convert a pipeline with a LightGbm model","Convert a pipeline with a XGBoost model","TfIdfVectorizer with ONNX","Computation times","Examples","Train and deploy a scikit-learn pipeline","Benchmark ONNX conversion","Converter for WOEEncoder from categorical_encoder","What is the opset number?","One model, many possible conversions with options","Black list operators when converting","Choose appropriate output of a classifier","Issues when switching to float","Intermediate results and investigation","Store arrays in one onnx graph","Dataframe as an input","Transfer Learning with ONNX","Modify the ONNX graph","Convert a pipeline with a LightGBM classifier","Convert a pipeline with a LightGBM regressor","Convert a pipeline with a XGBoost model","Implement a new converter","Two ways to implement a converter","Implement a new converter using other converters","A new converter with options","Change the number of outputs by adding a parser","Fast design with a python runtime","Fast runtime with onnxruntime","TfIdf and sparse matrices","Converter for pyod.models.iforest.IForest","Converter for WOE","Computation times","sklearn-onnx: Convert your scikit-learn model into ONNX","Tutorial","Introduction","Converters with options","Convert a pipeline","Supported scikit-learn Models","Using converters from other libraries","The easy case","Write converters for other libraries","A custom converter for a custom model","Extend ONNX, extend runtime","Advanced scenarios"],titleterms:{"0":52,"1":58,"2":58,"3":58,"case":63,"class":[11,59],"default":[5,34,35],"export":58,"final":[13,14,37,42,44,45,47,49,50],"float":36,"import":52,"new":[0,6,12,45,47,48,49,50,60],"switch":36,"true":[16,35],A:[31,39,48,49,50,54,65],If:[5,34],No:36,One:[11,33,54],The:[36,63],ad:49,add:38,advanc:67,after:52,again:[24,25,42,44],all:36,altern:14,an:[6,39,40],api:[0,22],appropri:35,arrai:38,artifici:52,attempt:16,avail:[33,61],backend:2,basic:41,behaviour:35,benchmark:[3,4,30],black:34,booster:44,build:21,calcul:[50,60],cannot:[5,34],castregressor:36,casttransform:36,categori:39,categorical_encod:31,cdist:3,chang:[41,49],check:53,choos:35,classifi:[13,14,24,25,35,40,42,44,59],column:35,columntransform:7,compar:[3,4,7,11,17,24,25,35,42,44],comparison:[29,48],complex:[7,17,60],comput:[9,17,27,55],concept:0,conclus:52,contain:0,contributor:0,convers:[4,5,12,13,14,18,19,21,26,30,33,34,39,45,46,47,48,49,54,60],convert:[0,5,7,8,9,10,11,12,13,14,16,17,19,24,25,29,31,34,35,38,42,43,44,45,46,47,48,50,53,54,56,58,59,60,61,62,64,65],convert_sklearn:58,countvector:59,cover:61,creat:[4,7,17,18],custom:[10,13,14,21,31,45,46,47,48,53,65],data:[32,36,52],datafram:39,dataset:[39,52],decision_funct:8,decision_path:33,decomposit:21,decorrel:50,defin:[7,17],dens:52,deploi:29,design:50,differ:10,dimens:16,discrep:[6,16,36,43,53,60],displai:[7,10,12,13,14,17,22,24,25,26],document:61,doubl:16,draw:23,easi:63,eig:50,end:40,ensembl:52,error:15,everi:37,exampl:[1,6,22,28,31,41,44,50,54,60],experiment:12,extend:[50,66],fail:6,fals:35,fast:[50,51],favorit:58,first:16,format:[23,58],framework:58,from:[31,62],galleri:1,gaussianmixtur:[5,34],gaussianprocessorregressor:16,gaussianprocessregressor:59,graph:[0,3,7,10,12,13,14,17,22,24,25,26,37,38,39,40,41,42,44,45,47,49,50,54],half:54,hot:54,iforest:53,imag:40,implement:[12,45,46,47],increas:16,infer:38,influenc:43,inform:59,initial_typ:58,input:[7,17,39,41],intermedi:[4,17,18,37,41],introduct:58,investig:[18,37,60],iri:[13,14],issu:36,json:23,kmean:10,layer:40,learn:[29,40,56,61],let:[11,35],lgbmclassifi:[24,42,43],lgbmregressor:43,librari:[62,64],lightgbm:[24,42,43],line:54,list:[5,33,34],load:[38,40,58],log:[0,19],logic:39,look:37,mani:33,manipul:0,matric:52,measur:30,metadata:20,miss:60,mnist:12,model:[5,8,9,10,11,12,13,14,16,19,21,23,24,25,29,33,35,38,40,44,45,46,47,48,53,56,58,61,65],modifi:41,more:[36,40],multioutputclassifi:35,multipl:22,name:41,nan:52,nearestneighbor:59,neither:[13,14],nmf:21,node:[0,37],nor:[13,14],number:[32,49],object:10,one:[38,40],onnx:[0,2,3,4,7,9,10,12,13,14,16,17,18,19,21,22,23,24,25,26,29,30,32,38,39,40,41,45,46,47,48,49,50,54,56,58,61,66],onnxab:61,onnxabs_13:61,onnxabs_1:61,onnxabs_6:61,onnxaco:61,onnxacos_7:61,onnxacosh:61,onnxacosh_9:61,onnxadagrad:61,onnxadagrad_1:61,onnxadam:61,onnxadam_1:61,onnxadd:61,onnxadd_13:61,onnxadd_14:61,onnxadd_1:61,onnxadd_6:61,onnxadd_7:61,onnxand:61,onnxand_1:61,onnxand_7:61,onnxargmax:61,onnxargmax_11:61,onnxargmax_12:61,onnxargmax_13:61,onnxargmax_1:61,onnxargmin:61,onnxargmin_11:61,onnxargmin_12:61,onnxargmin_13:61,onnxargmin_1:61,onnxarrayfeatureextractor:61,onnxarrayfeatureextractor_1:61,onnxasin:61,onnxasin_7:61,onnxasinh:61,onnxasinh_9:61,onnxatan:61,onnxatan_7:61,onnxatanh:61,onnxatanh_9:61,onnxaveragepool:61,onnxaveragepool_10:61,onnxaveragepool_11:61,onnxaveragepool_1:61,onnxaveragepool_7:61,onnxbatchnorm:61,onnxbatchnormalization_14:61,onnxbatchnormalization_15:61,onnxbatchnormalization_1:61,onnxbatchnormalization_6:61,onnxbatchnormalization_7:61,onnxbatchnormalization_9:61,onnxbernoulli:61,onnxbernoulli_15:61,onnxbinar:61,onnxbinarizer_1:61,onnxbitshift:61,onnxbitshift_11:61,onnxboost:61,onnxcast:61,onnxcast_13:61,onnxcast_1:61,onnxcast_6:61,onnxcast_9:61,onnxcastlik:61,onnxcastlike_15:61,onnxcastmap:61,onnxcastmap_1:61,onnxcastregressor:61,onnxcasttransform:61,onnxcategorymapp:61,onnxcategorymapper_1:61,onnxceil:61,onnxceil_13:61,onnxceil_1:61,onnxceil_6:61,onnxcelu:61,onnxcelu_12:61,onnxclip:61,onnxclip_11:61,onnxclip_12:61,onnxclip_13:61,onnxclip_1:61,onnxclip_6:61,onnxco:61,onnxcompress:61,onnxcompress_11:61,onnxcompress_9:61,onnxconcat:61,onnxconcat_11:61,onnxconcat_13:61,onnxconcat_1:61,onnxconcat_4:61,onnxconcatfromsequ:61,onnxconcatfromsequence_11:61,onnxconst:61,onnxconstant_11:61,onnxconstant_12:61,onnxconstant_13:61,onnxconstant_1:61,onnxconstant_9:61,onnxconstantofshap:61,onnxconstantofshape_9:61,onnxconv:61,onnxconv_11:61,onnxconv_1:61,onnxconvinteg:61,onnxconvinteger_10:61,onnxconvtranspos:61,onnxconvtranspose_11:61,onnxconvtranspose_1:61,onnxcos_7:61,onnxcosh:61,onnxcosh_9:61,onnxcumsum:61,onnxcumsum_11:61,onnxcumsum_14:61,onnxcustomscorertransform:61,onnxdecorrelatetransform:61,onnxdepthtospac:61,onnxdepthtospace_11:61,onnxdepthtospace_13:61,onnxdepthtospace_1:61,onnxdequantizelinear:61,onnxdequantizelinear_10:61,onnxdequantizelinear_13:61,onnxdet:61,onnxdet_11:61,onnxdictvector:61,onnxdictvectorizer_1:61,onnxdiv:61,onnxdiv_13:61,onnxdiv_14:61,onnxdiv_1:61,onnxdiv_6:61,onnxdiv_7:61,onnxdropout:61,onnxdropout_10:61,onnxdropout_12:61,onnxdropout_13:61,onnxdropout_1:61,onnxdropout_6:61,onnxdropout_7:61,onnxdynamicquantizelinear:61,onnxdynamicquantizelinear_11:61,onnxeinsum:61,onnxeinsum_12:61,onnxelu:61,onnxelu_1:61,onnxelu_6:61,onnxequ:61,onnxequal_11:61,onnxequal_13:61,onnxequal_1:61,onnxequal_7:61,onnxerf:61,onnxerf_13:61,onnxerf_9:61,onnxexp:61,onnxexp_13:61,onnxexp_1:61,onnxexp_6:61,onnxexpand:61,onnxexpand_13:61,onnxexpand_8:61,onnxeyelik:61,onnxeyelike_9:61,onnxfeaturevector:61,onnxfeaturevectorizer_1:61,onnxflatten:61,onnxflatten_11:61,onnxflatten_13:61,onnxflatten_1:61,onnxflatten_9:61,onnxfloor:61,onnxfloor_13:61,onnxfloor_1:61,onnxfloor_6:61,onnxgath:61,onnxgather_11:61,onnxgather_13:61,onnxgather_1:61,onnxgatherel:61,onnxgatherelements_11:61,onnxgatherelements_13:61,onnxgathernd:61,onnxgathernd_11:61,onnxgathernd_12:61,onnxgathernd_13:61,onnxgemm:61,onnxgemm_11:61,onnxgemm_13:61,onnxgemm_1:61,onnxgemm_6:61,onnxgemm_7:61,onnxgemm_9:61,onnxglobalaveragepool:61,onnxglobalaveragepool_1:61,onnxgloballppool:61,onnxgloballppool_1:61,onnxgloballppool_2:61,onnxglobalmaxpool:61,onnxglobalmaxpool_1:61,onnxgradi:61,onnxgradient_1:61,onnxgreat:61,onnxgreater_13:61,onnxgreater_1:61,onnxgreater_7:61,onnxgreater_9:61,onnxgreaterorequ:61,onnxgreaterorequal_12:61,onnxgreaterorequal_16:61,onnxgridsampl:61,onnxgridsample_16:61,onnxgru:61,onnxgru_14:61,onnxgru_1:61,onnxgru_3:61,onnxgru_7:61,onnxhardmax:61,onnxhardmax_11:61,onnxhardmax_13:61,onnxhardmax_1:61,onnxhardsigmoid:61,onnxhardsigmoid_1:61,onnxhardsigmoid_6:61,onnxhardswish:61,onnxhardswish_14:61,onnxident:61,onnxidentity_13:61,onnxidentity_14:61,onnxidentity_16:61,onnxidentity_1:61,onnxif:61,onnxif_11:61,onnxif_13:61,onnxif_16:61,onnxif_1:61,onnximput:61,onnximputer_1:61,onnxinstancenorm:61,onnxinstancenormalization_1:61,onnxinstancenormalization_6:61,onnxisinf:61,onnxisinf_10:61,onnxisnan:61,onnxisnan_13:61,onnxisnan_9:61,onnxlabelencod:61,onnxlabelencoder_1:61,onnxlabelencoder_2:61,onnxleakyrelu:61,onnxleakyrelu_16:61,onnxleakyrelu_1:61,onnxleakyrelu_6:61,onnxless:61,onnxless_13:61,onnxless_1:61,onnxless_7:61,onnxless_9:61,onnxlessorequ:61,onnxlessorequal_12:61,onnxlessorequal_16:61,onnxlinearclassifi:61,onnxlinearclassifier_1:61,onnxlinearregressor:61,onnxlinearregressor_1:61,onnxlivedecorrelatetransform:61,onnxlog:61,onnxlog_13:61,onnxlog_1:61,onnxlog_6:61,onnxlogsoftmax:61,onnxlogsoftmax_11:61,onnxlogsoftmax_13:61,onnxlogsoftmax_1:61,onnxloop:61,onnxloop_11:61,onnxloop_13:61,onnxloop_16:61,onnxloop_1:61,onnxlpnorm:61,onnxlpnormalization_1:61,onnxlppool:61,onnxlppool_11:61,onnxlppool_1:61,onnxlppool_2:61,onnxlrn:61,onnxlrn_13:61,onnxlrn_1:61,onnxlstm:61,onnxlstm_14:61,onnxlstm_1:61,onnxlstm_7:61,onnxmatmul:61,onnxmatmul_13:61,onnxmatmul_1:61,onnxmatmul_9:61,onnxmatmulinteg:61,onnxmatmulinteger_10:61,onnxmax:61,onnxmax_12:61,onnxmax_13:61,onnxmax_1:61,onnxmax_6:61,onnxmax_8:61,onnxmaxpool:61,onnxmaxpool_10:61,onnxmaxpool_11:61,onnxmaxpool_12:61,onnxmaxpool_1:61,onnxmaxpool_8:61,onnxmaxroipool:61,onnxmaxroipool_1:61,onnxmaxunpool:61,onnxmaxunpool_11:61,onnxmaxunpool_9:61,onnxmean:61,onnxmean_13:61,onnxmean_1:61,onnxmean_6:61,onnxmean_8:61,onnxmeanvariancenorm:61,onnxmeanvariancenormalization_13:61,onnxmeanvariancenormalization_9:61,onnxmin:61,onnxmin_12:61,onnxmin_13:61,onnxmin_1:61,onnxmin_6:61,onnxmin_8:61,onnxmockwrappedlightgbmboosterclassifi:61,onnxmod:61,onnxmod_10:61,onnxmod_13:61,onnxmomentum:61,onnxmomentum_1:61,onnxmul:61,onnxmul_13:61,onnxmul_14:61,onnxmul_1:61,onnxmul_6:61,onnxmul_7:61,onnxmultinomi:61,onnxmultinomial_7:61,onnxneg:61,onnxneg_13:61,onnxneg_1:61,onnxneg_6:61,onnxnegativeloglikelihoodloss:61,onnxnegativeloglikelihoodloss_12:61,onnxnegativeloglikelihoodloss_13:61,onnxnonmaxsuppress:61,onnxnonmaxsuppression_10:61,onnxnonmaxsuppression_11:61,onnxnonzero:61,onnxnonzero_13:61,onnxnonzero_9:61,onnxnorm:61,onnxnormalizer_1:61,onnxnot:61,onnxnot_1:61,onnxonehot:61,onnxonehot_11:61,onnxonehot_9:61,onnxonehotencod:61,onnxonehotencoder_1:61,onnxopt:61,onnxoptional_15:61,onnxoptionalgetel:61,onnxoptionalgetelement_15:61,onnxoptionalhasel:61,onnxoptionalhaselement_15:61,onnxor:61,onnxor_1:61,onnxor_7:61,onnxordinalencod:61,onnxpad:61,onnxpad_11:61,onnxpad_13:61,onnxpad_1:61,onnxpad_2:61,onnxpow:61,onnxpow_12:61,onnxpow_13:61,onnxpow_15:61,onnxpow_1:61,onnxpow_7:61,onnxpredictabletsn:61,onnxprelu:61,onnxprelu_16:61,onnxprelu_1:61,onnxprelu_6:61,onnxprelu_7:61,onnxprelu_9:61,onnxqlinearconv:61,onnxqlinearconv_10:61,onnxqlinearmatmul:61,onnxqlinearmatmul_10:61,onnxquantizelinear:61,onnxquantizelinear_10:61,onnxquantizelinear_13:61,onnxrandomnorm:61,onnxrandomnormal_1:61,onnxrandomnormallik:61,onnxrandomnormallike_1:61,onnxrandomuniform:61,onnxrandomuniform_1:61,onnxrandomuniformlik:61,onnxrandomuniformlike_1:61,onnxrang:61,onnxrange_11:61,onnxreciproc:61,onnxreciprocal_13:61,onnxreciprocal_1:61,onnxreciprocal_6:61,onnxreducel1:61,onnxreducel1_11:61,onnxreducel1_13:61,onnxreducel1_1:61,onnxreducel2:61,onnxreducel2_11:61,onnxreducel2_13:61,onnxreducel2_1:61,onnxreducelogsum:61,onnxreducelogsum_11:61,onnxreducelogsum_13:61,onnxreducelogsum_1:61,onnxreducelogsumexp:61,onnxreducelogsumexp_11:61,onnxreducelogsumexp_13:61,onnxreducelogsumexp_1:61,onnxreducemax:61,onnxreducemax_11:61,onnxreducemax_12:61,onnxreducemax_13:61,onnxreducemax_1:61,onnxreducemean:61,onnxreducemean_11:61,onnxreducemean_13:61,onnxreducemean_1:61,onnxreducemin:61,onnxreducemin_11:61,onnxreducemin_12:61,onnxreducemin_13:61,onnxreducemin_1:61,onnxreduceprod:61,onnxreduceprod_11:61,onnxreduceprod_13:61,onnxreduceprod_1:61,onnxreducesum:61,onnxreducesum_11:61,onnxreducesum_13:61,onnxreducesum_1:61,onnxreducesumsquar:61,onnxreducesumsquare_11:61,onnxreducesumsquare_13:61,onnxreducesumsquare_1:61,onnxrelu:61,onnxrelu_13:61,onnxrelu_14:61,onnxrelu_1:61,onnxrelu_6:61,onnxreplacetransform:61,onnxres:61,onnxreshap:61,onnxreshape_13:61,onnxreshape_14:61,onnxreshape_1:61,onnxreshape_5:61,onnxresize_10:61,onnxresize_11:61,onnxresize_13:61,onnxreversesequ:61,onnxreversesequence_10:61,onnxrnn:61,onnxrnn_14:61,onnxrnn_1:61,onnxrnn_7:61,onnxroialign:61,onnxroialign_10:61,onnxroialign_16:61,onnxround:61,onnxround_11:61,onnxruntim:[3,10,15,51],onnxscal:61,onnxscaler_1:61,onnxscan:61,onnxscan_11:61,onnxscan_16:61,onnxscan_8:61,onnxscan_9:61,onnxscatt:61,onnxscatter_11:61,onnxscatter_9:61,onnxscatterel:61,onnxscatterelements_11:61,onnxscatterelements_13:61,onnxscatterelements_16:61,onnxscatternd:61,onnxscatternd_11:61,onnxscatternd_13:61,onnxscatternd_16:61,onnxselu:61,onnxselu_1:61,onnxselu_6:61,onnxsequenceat:61,onnxsequenceat_11:61,onnxsequenceconstruct:61,onnxsequenceconstruct_11:61,onnxsequenceempti:61,onnxsequenceempty_11:61,onnxsequenceeras:61,onnxsequenceerase_11:61,onnxsequenceinsert:61,onnxsequenceinsert_11:61,onnxsequencelength:61,onnxsequencelength_11:61,onnxshap:61,onnxshape_13:61,onnxshape_15:61,onnxshape_1:61,onnxshrink:61,onnxshrink_9:61,onnxsigmoid:61,onnxsigmoid_13:61,onnxsigmoid_1:61,onnxsigmoid_6:61,onnxsign:61,onnxsign_13:61,onnxsign_9:61,onnxsin:61,onnxsin_7:61,onnxsinh:61,onnxsinh_9:61,onnxsiz:61,onnxsize_13:61,onnxsize_1:61,onnxsklearnadaboostclassifi:61,onnxsklearnadaboostregressor:61,onnxsklearnardregress:61,onnxsklearnbaggingclassifi:61,onnxsklearnbaggingregressor:61,onnxsklearnbayesiangaussianmixtur:61,onnxsklearnbayesianridg:61,onnxsklearnbernoullinb:61,onnxsklearnbinar:61,onnxsklearncalibratedclassifiercv:61,onnxsklearncategoricalnb:61,onnxsklearncolumntransform:61,onnxsklearncomplementnb:61,onnxsklearncountvector:61,onnxsklearndecisiontreeclassifi:61,onnxsklearndecisiontreeregressor:61,onnxsklearndictvector:61,onnxsklearnelasticnet:61,onnxsklearnelasticnetcv:61,onnxsklearnextratreeclassifi:61,onnxsklearnextratreeregressor:61,onnxsklearnextratreesclassifi:61,onnxsklearnextratreesregressor:61,onnxsklearnfeatureunion:61,onnxsklearnfunctiontransform:61,onnxsklearngaussianmixtur:61,onnxsklearngaussiannb:61,onnxsklearngaussianprocessclassifi:61,onnxsklearngaussianprocessregressor:61,onnxsklearngaussianrandomproject:61,onnxsklearngenericunivariateselect:61,onnxsklearngradientboostingclassifi:61,onnxsklearngradientboostingregressor:61,onnxsklearngridsearchcv:61,onnxsklearnhistgradientboostingclassifi:61,onnxsklearnhistgradientboostingregressor:61,onnxsklearnhuberregressor:61,onnxsklearnincrementalpca:61,onnxsklearnisolationforest:61,onnxsklearnkbinsdiscret:61,onnxsklearnkernelcenter:61,onnxsklearnkernelpca:61,onnxsklearnkmean:61,onnxsklearnkneighborsclassifi:61,onnxsklearnkneighborsregressor:61,onnxsklearnkneighborstransform:61,onnxsklearnknnimput:61,onnxsklearnlabelbinar:61,onnxsklearnlabelencod:61,onnxsklearnlar:61,onnxsklearnlarscv:61,onnxsklearnlasso:61,onnxsklearnlassocv:61,onnxsklearnlassolar:61,onnxsklearnlassolars:61,onnxsklearnlassolarscv:61,onnxsklearnlgbmclassifi:61,onnxsklearnlgbmregressor:61,onnxsklearnlineardiscriminantanalysi:61,onnxsklearnlinearregress:61,onnxsklearnlinearsvc:61,onnxsklearnlinearsvr:61,onnxsklearnlocaloutlierfactor:61,onnxsklearnlogisticregress:61,onnxsklearnlogisticregressioncv:61,onnxsklearnmaxabsscal:61,onnxsklearnminibatchkmean:61,onnxsklearnminmaxscal:61,onnxsklearnmlpclassifi:61,onnxsklearnmlpregressor:61,onnxsklearnmultinomialnb:61,onnxsklearnmultioutputclassifi:61,onnxsklearnmultioutputregressor:61,onnxsklearnmultitaskelasticnet:61,onnxsklearnmultitaskelasticnetcv:61,onnxsklearnmultitasklasso:61,onnxsklearnmultitasklassocv:61,onnxsklearnnearestneighbor:61,onnxsklearnneighborhoodcomponentsanalysi:61,onnxsklearnnorm:61,onnxsklearnnusvc:61,onnxsklearnnusvr:61,onnxsklearnoneclasssvm:61,onnxsklearnonehotencod:61,onnxsklearnonevsrestclassifi:61,onnxsklearnordinalencod:61,onnxsklearnorthogonalmatchingpursuit:61,onnxsklearnorthogonalmatchingpursuitcv:61,onnxsklearnpassiveaggressiveclassifi:61,onnxsklearnpassiveaggressiveregressor:61,onnxsklearnpca:61,onnxsklearnperceptron:61,onnxsklearnpipelin:61,onnxsklearnplsregress:61,onnxsklearnpoissonregressor:61,onnxsklearnpolynomialfeatur:61,onnxsklearnpowertransform:61,onnxsklearnquantileregressor:61,onnxsklearnradiusneighborsclassifi:61,onnxsklearnradiusneighborsregressor:61,onnxsklearnrandomforestclassifi:61,onnxsklearnrandomforestregressor:61,onnxsklearnrandomtreesembed:61,onnxsklearnransacregressor:61,onnxsklearnrf:61,onnxsklearnrfecv:61,onnxsklearnridg:61,onnxsklearnridgeclassifi:61,onnxsklearnridgeclassifiercv:61,onnxsklearnridgecv:61,onnxsklearnrobustscal:61,onnxsklearnselectfdr:61,onnxsklearnselectfpr:61,onnxsklearnselectfrommodel:61,onnxsklearnselectfw:61,onnxsklearnselectkbest:61,onnxsklearnselectpercentil:61,onnxsklearnsgdclassifi:61,onnxsklearnsgdregressor:61,onnxsklearnsimpleimput:61,onnxsklearnstackingclassifi:61,onnxsklearnstackingregressor:61,onnxsklearnstandardscal:61,onnxsklearnsvc:61,onnxsklearnsvr:61,onnxsklearntfidftransform:61,onnxsklearntfidfvector:61,onnxsklearntheilsenregressor:61,onnxsklearntruncatedsvd:61,onnxsklearntweedieregressor:61,onnxsklearnvariancethreshold:61,onnxsklearnvotingclassifi:61,onnxsklearnvotingregressor:61,onnxsklearnxgbclassifi:61,onnxsklearnxgbregressor:61,onnxslic:61,onnxslice_10:61,onnxslice_11:61,onnxslice_13:61,onnxslice_1:61,onnxsoftmax:61,onnxsoftmax_11:61,onnxsoftmax_13:61,onnxsoftmax_1:61,onnxsoftmaxcrossentropyloss:61,onnxsoftmaxcrossentropyloss_12:61,onnxsoftmaxcrossentropyloss_13:61,onnxsoftplu:61,onnxsoftplus_1:61,onnxsoftsign:61,onnxsoftsign_1:61,onnxspacetodepth:61,onnxspacetodepth_13:61,onnxspacetodepth_1:61,onnxsplit:61,onnxsplit_11:61,onnxsplit_13:61,onnxsplit_1:61,onnxsplit_2:61,onnxsplittosequ:61,onnxsplittosequence_11:61,onnxsqrt:61,onnxsqrt_13:61,onnxsqrt_1:61,onnxsqrt_6:61,onnxsqueez:61,onnxsqueeze_11:61,onnxsqueeze_13:61,onnxsqueeze_1:61,onnxstringnorm:61,onnxstringnormalizer_10:61,onnxsub:61,onnxsub_13:61,onnxsub_14:61,onnxsub_1:61,onnxsub_6:61,onnxsub_7:61,onnxsum:61,onnxsum_13:61,onnxsum_1:61,onnxsum_6:61,onnxsum_8:61,onnxsvmclassifi:61,onnxsvmclassifier_1:61,onnxsvmregressor:61,onnxsvmregressor_1:61,onnxtan:61,onnxtan_7:61,onnxtanh:61,onnxtanh_13:61,onnxtanh_1:61,onnxtanh_6:61,onnxtfidfvector:61,onnxtfidfvectorizer_9:61,onnxthresholdedrelu:61,onnxthresholdedrelu_10:61,onnxtil:61,onnxtile_13:61,onnxtile_1:61,onnxtile_6:61,onnxtopk:61,onnxtopk_10:61,onnxtopk_11:61,onnxtopk_1:61,onnxtransfertransform:61,onnxtranspos:61,onnxtranspose_13:61,onnxtranspose_1:61,onnxtreeensembleclassifi:61,onnxtreeensembleclassifier_1:61,onnxtreeensembleclassifier_3:61,onnxtreeensembleregressor:61,onnxtreeensembleregressor_1:61,onnxtreeensembleregressor_3:61,onnxtrilu:61,onnxtrilu_14:61,onnxuniqu:61,onnxunique_11:61,onnxunsqueez:61,onnxunsqueeze_11:61,onnxunsqueeze_13:61,onnxunsqueeze_1:61,onnxupsampl:61,onnxupsample_10:61,onnxupsample_7:61,onnxupsample_9:61,onnxvalidatorclassifi:61,onnxwher:61,onnxwhere_16:61,onnxwhere_9:61,onnxwoeencod:61,onnxwoetransform:61,onnxwrappedlightgbmboost:61,onnxwrappedlightgbmboosterclassifi:61,onnxxor:61,onnxxor_1:61,onnxxor_7:61,onnxzipmap:61,onnxzipmap_1:61,oper:[5,21,22,34,50,61],opset:32,option:[33,35,48,59],ordinalencod:31,other:[32,47,62,64],output:[4,8,11,17,35,38,41,49],output_class_label:35,own:12,paramet:[19,38],parameter:60,parser:[0,13,14,49,60],pca:40,per:11,pickabl:59,pickl:18,pipelin:[4,6,7,10,17,18,23,24,25,26,29,30,33,36,37,40,42,43,44,59,60,61],plai:22,possibl:33,predict:[7,9,10,11,17,24,25,29,35,39,42,44],probabl:[8,11],process:[5,30,34,43],pyod:53,python:[22,29,37,50],quick:58,raw:[8,59],raw_scor:33,reduc:5,reducelogsumexp:[5,34],regist:[0,24,25,42,43,44],registr:50,regressor:[13,14,43],remov:40,renam:41,repeat:12,replac:52,result:[37,41],retriev:[23,40],return_std:16,run:58,runtim:[2,9,29,30,37,50,51,58,66],s:[11,35],same:[22,44],save:38,scenario:67,scikit:[29,56,61],scipi:3,scope:0,score:[8,13,14,59],second:16,select:38,setup:52,shape:[50,60],shape_calcul:12,simpl:[10,21,31,54],size:16,sklearn:[22,56],sledgehamm:36,sne:12,spars:52,split:43,standardscal:6,start:58,step:[4,18,37,58],store:38,sub:17,summari:0,support:61,t:12,test:[13,14],tfidf:52,tfidfvector:[26,59],third:16,through:17,time:[5,11,27,30,34,35,43,48,55],titan:60,to_onnx:58,topolog:0,train:[7,8,9,11,16,17,19,24,25,26,29,30,35,38,42,43,44,52,53,58],transfer:40,transform:[12,49,50],tutori:57,two:[46,49],type:[8,11],unhid:39,us:[16,47,58,62],util:0,variabl:[16,50],vector:11,verbos:19,version:0,wai:[10,46],walk:17,what:32,when:[13,14,34,36],which:50,without:[5,11,34],woe:54,woeencod:31,write:[12,64],xgbclassifi:[25,44],xgboost:[25,44],xgbregressor:44,your:[12,56,58],zimpap:35,zipmap:[11,33,35,59]}}) \ No newline at end of file +Search.setIndex({"docnames": ["api_summary", "auto_examples/index", "auto_examples/plot_backend", "auto_examples/plot_benchmark_cdist", "auto_examples/plot_benchmark_pipeline", "auto_examples/plot_black_op", "auto_examples/plot_cast_transformer", "auto_examples/plot_complex_pipeline", "auto_examples/plot_convert_decision_function", "auto_examples/plot_convert_model", "auto_examples/plot_convert_syntax", "auto_examples/plot_convert_zipmap", "auto_examples/plot_custom_model", "auto_examples/plot_custom_parser", "auto_examples/plot_custom_parser_alternative", "auto_examples/plot_errors_onnxruntime", "auto_examples/plot_gpr", "auto_examples/plot_intermediate_outputs", "auto_examples/plot_investigate_pipeline", "auto_examples/plot_logging", "auto_examples/plot_metadata", "auto_examples/plot_nmf", "auto_examples/plot_onnx_operators", "auto_examples/plot_pipeline", "auto_examples/plot_pipeline_lightgbm", "auto_examples/plot_pipeline_xgboost", "auto_examples/plot_tfidfvectorizer", "auto_examples/sg_execution_times", "auto_tutorial/index", "auto_tutorial/plot_abegin_convert_pipeline", "auto_tutorial/plot_bbegin_measure_time", "auto_tutorial/plot_catwoe_transformer", "auto_tutorial/plot_cbegin_opset", "auto_tutorial/plot_dbegin_options", "auto_tutorial/plot_dbegin_options_list", "auto_tutorial/plot_dbegin_options_zipmap", "auto_tutorial/plot_ebegin_float_double", "auto_tutorial/plot_fbegin_investigate", "auto_tutorial/plot_gbegin_cst", "auto_tutorial/plot_gbegin_dataframe", "auto_tutorial/plot_gbegin_transfer_learning", "auto_tutorial/plot_gconverting", "auto_tutorial/plot_gexternal_catboost", "auto_tutorial/plot_gexternal_lightgbm", "auto_tutorial/plot_gexternal_lightgbm_reg", "auto_tutorial/plot_gexternal_xgboost", "auto_tutorial/plot_icustom_converter", "auto_tutorial/plot_jcustom_syntax", "auto_tutorial/plot_kcustom_converter_wrapper", "auto_tutorial/plot_lcustom_options", "auto_tutorial/plot_mcustom_parser", "auto_tutorial/plot_ngrams", "auto_tutorial/plot_pextend_python_runtime", "auto_tutorial/plot_qextend_onnxruntime", "auto_tutorial/plot_transformer_discrepancy", "auto_tutorial/plot_usparse_xgboost", "auto_tutorial/plot_wext_pyod_forest", "auto_tutorial/plot_woe_transformer", "auto_tutorial/sg_execution_times", "index", "index_tutorial", "introduction", "parameterized", "pipeline", "supported", "tutorial_1-5_external", "tutorial_1_simple", "tutorial_2-5_extlib", "tutorial_2_new_converter", "tutorial_3_new_operator", "tutorial_4_advanced"], "filenames": ["api_summary.rst", "auto_examples/index.rst", "auto_examples/plot_backend.rst", "auto_examples/plot_benchmark_cdist.rst", "auto_examples/plot_benchmark_pipeline.rst", "auto_examples/plot_black_op.rst", "auto_examples/plot_cast_transformer.rst", "auto_examples/plot_complex_pipeline.rst", "auto_examples/plot_convert_decision_function.rst", "auto_examples/plot_convert_model.rst", "auto_examples/plot_convert_syntax.rst", "auto_examples/plot_convert_zipmap.rst", "auto_examples/plot_custom_model.rst", "auto_examples/plot_custom_parser.rst", "auto_examples/plot_custom_parser_alternative.rst", "auto_examples/plot_errors_onnxruntime.rst", "auto_examples/plot_gpr.rst", "auto_examples/plot_intermediate_outputs.rst", "auto_examples/plot_investigate_pipeline.rst", "auto_examples/plot_logging.rst", "auto_examples/plot_metadata.rst", "auto_examples/plot_nmf.rst", "auto_examples/plot_onnx_operators.rst", "auto_examples/plot_pipeline.rst", "auto_examples/plot_pipeline_lightgbm.rst", "auto_examples/plot_pipeline_xgboost.rst", "auto_examples/plot_tfidfvectorizer.rst", "auto_examples/sg_execution_times.rst", "auto_tutorial/index.rst", "auto_tutorial/plot_abegin_convert_pipeline.rst", "auto_tutorial/plot_bbegin_measure_time.rst", "auto_tutorial/plot_catwoe_transformer.rst", "auto_tutorial/plot_cbegin_opset.rst", "auto_tutorial/plot_dbegin_options.rst", "auto_tutorial/plot_dbegin_options_list.rst", "auto_tutorial/plot_dbegin_options_zipmap.rst", "auto_tutorial/plot_ebegin_float_double.rst", "auto_tutorial/plot_fbegin_investigate.rst", "auto_tutorial/plot_gbegin_cst.rst", "auto_tutorial/plot_gbegin_dataframe.rst", "auto_tutorial/plot_gbegin_transfer_learning.rst", "auto_tutorial/plot_gconverting.rst", "auto_tutorial/plot_gexternal_catboost.rst", "auto_tutorial/plot_gexternal_lightgbm.rst", "auto_tutorial/plot_gexternal_lightgbm_reg.rst", "auto_tutorial/plot_gexternal_xgboost.rst", "auto_tutorial/plot_icustom_converter.rst", "auto_tutorial/plot_jcustom_syntax.rst", "auto_tutorial/plot_kcustom_converter_wrapper.rst", "auto_tutorial/plot_lcustom_options.rst", "auto_tutorial/plot_mcustom_parser.rst", "auto_tutorial/plot_ngrams.rst", "auto_tutorial/plot_pextend_python_runtime.rst", "auto_tutorial/plot_qextend_onnxruntime.rst", "auto_tutorial/plot_transformer_discrepancy.rst", "auto_tutorial/plot_usparse_xgboost.rst", "auto_tutorial/plot_wext_pyod_forest.rst", "auto_tutorial/plot_woe_transformer.rst", "auto_tutorial/sg_execution_times.rst", "index.rst", "index_tutorial.rst", "introduction.rst", "parameterized.rst", "pipeline.rst", "supported.rst", "tutorial_1-5_external.rst", "tutorial_1_simple.rst", "tutorial_2-5_extlib.rst", "tutorial_2_new_converter.rst", "tutorial_3_new_operator.rst", "tutorial_4_advanced.rst"], "titles": ["API Summary", "Gallery of examples", "ONNX Runtime Backend for ONNX", "Compare CDist with scipy", "Benchmark a pipeline", "Convert a model with a reduced list of operators", "Discrepencies with StandardScaler", "Convert a pipeline with ColumnTransformer", "Probabilities or raw scores", "Train, convert and predict a model", "Different ways to convert a model", "Probabilities as a vector or as a ZipMap", "Write your own converter for your own model", "When a custom model is neither a classifier nor a regressor", "When a custom model is neither a classifier nor a regressor (alternative)", "Errors with onnxruntime", "Discrepencies with GaussianProcessorRegressor: use of double", "Walk through intermediate outputs", "Investigate a pipeline", "Logging, verbose", "Metadata", "Custom Operator for NMF Decomposition", "Play with ONNX operators", "Draw a pipeline", "Convert a pipeline with a LightGbm model", "Convert a pipeline with a XGBoost model", "TfIdfVectorizer with ONNX", "Computation times", "Examples", "Train and deploy a scikit-learn pipeline", "Benchmark ONNX conversion", "Converter for WOEEncoder from categorical_encoder", "What is the opset number?", "One model, many possible conversions with options", "Black list operators when converting", "Choose appropriate output of a classifier", "Issues when switching to float", "Intermediate results and investigation", "Store arrays in one onnx graph", "Dataframe as an input", "Transfer Learning with ONNX", "Modify the ONNX graph", "Convert a pipeline with a CatBoost classifier", "Convert a pipeline with a LightGBM classifier", "Convert a pipeline with a LightGBM regressor", "Convert a pipeline with a XGBoost model", "Implement a new converter", "Two ways to implement a converter", "Implement a new converter using other converters", "A new converter with options", "Change the number of outputs by adding a parser", "Tricky issue when converting CountVectorizer or TfidfVectorizer", "Fast design with a python runtime", "Fast runtime with onnxruntime", "Dealing with discrepancies (tf-idf)", "TfIdf and sparse matrices", "Converter for pyod.models.iforest.IForest", "Converter for WOE", "Computation times", "sklearn-onnx: Convert your scikit-learn model into ONNX", "Tutorial", "Introduction", "Converters with options", "Convert a pipeline", "Supported scikit-learn Models", "Using converters from other libraries", "The easy case", "Write converters for other libraries", "A custom converter for a custom model", "Extend ONNX, extend runtime", "Advanced scenarios"], "terms": {"public": 0, "function": [0, 3, 10, 12, 13, 14, 16, 21, 22, 25, 30, 33, 36, 37, 38, 39, 41, 42, 45, 46, 47, 48, 52, 53, 56, 59, 61, 62, 63, 64, 68], "class": [0, 4, 10, 12, 13, 14, 15, 16, 18, 19, 22, 25, 26, 31, 35, 36, 37, 46, 47, 48, 49, 50, 51, 52, 55, 56, 57, 63, 64, 68], "expos": [0, 35], "scikit": [0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 35, 36, 37, 38, 40, 42, 43, 45, 46, 48, 51, 55, 56, 58, 60, 61, 62, 63, 66, 67, 68], "skl2onnx": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62, 63, 64, 68], "get_latest_tested_opset_vers": 0, "sourc": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 62, 63, 64], "thi": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 51, 52, 53, 54, 55, 56, 59, 61, 62, 63, 64, 68, 69, 70], "modul": [0, 4, 9, 18, 25, 29, 33, 45, 61], "reli": [0, 22, 29], "onnxruntim": [0, 1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69], "test": [0, 2, 7, 12, 22, 26, 30, 31, 32, 46, 48, 51, 52, 53, 59, 60, 63, 64], "everi": [0, 4, 6, 7, 12, 13, 14, 16, 17, 18, 22, 25, 31, 32, 33, 34, 35, 36, 38, 40, 44, 45, 46, 47, 48, 51, 52, 56, 57, 59, 62, 63, 64, 66, 67, 68, 69], "The": [0, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 24, 25, 26, 29, 30, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 57, 59, 60, 61, 62, 63, 64, 68], "return": [0, 3, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 21, 22, 26, 29, 31, 32, 33, 35, 36, 37, 40, 41, 42, 46, 47, 48, 49, 50, 52, 54, 55, 56, 57, 59, 63, 64], "most": [0, 7, 17, 22, 23, 32, 34, 36, 46, 47, 56, 59, 62, 63, 64], "recent": [0, 7, 32, 42, 45, 59], "target": [0, 2, 4, 8, 9, 11, 12, 13, 14, 15, 16, 18, 19, 24, 25, 26, 31, 32, 33, 35, 38, 41, 42, 43, 45, 46, 48, 55, 59, 61, 64], "opset": [0, 19, 28, 36, 42, 46, 47, 48, 52, 58, 59, 60, 64, 66], "specifi": [0, 12, 13, 14, 16, 32, 33, 41, 52, 59, 62, 64], "packag": [0, 2, 22, 23, 44, 47, 52, 55, 59, 63, 64, 67, 68], "one": [0, 3, 4, 5, 7, 10, 11, 12, 13, 14, 16, 17, 18, 19, 22, 23, 26, 28, 30, 31, 32, 33, 35, 37, 39, 42, 44, 46, 47, 50, 52, 53, 54, 55, 57, 58, 59, 60, 61, 62, 63, 64, 66], "i": [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 68, 69], "lower": [0, 16, 64], "def": [0, 3, 6, 7, 10, 12, 13, 14, 17, 21, 22, 26, 29, 31, 32, 33, 36, 40, 41, 42, 44, 46, 47, 48, 49, 50, 52, 54, 55, 56], "onnx_opset_vers": [0, 32, 59], "both": [0, 17, 29, 36, 46, 49, 50, 62, 64], "learn": [0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 31, 32, 33, 35, 36, 37, 38, 42, 43, 45, 46, 48, 51, 52, 55, 56, 58, 60, 61, 62, 63, 66, 67, 68], "model": [0, 1, 2, 4, 6, 7, 15, 17, 18, 20, 22, 26, 27, 28, 30, 31, 32, 34, 36, 37, 42, 43, 44, 50, 52, 53, 54, 55, 58, 60, 62, 63, 65, 66, 67, 69], "first": [0, 2, 5, 10, 11, 12, 14, 15, 17, 21, 22, 26, 29, 33, 34, 35, 36, 37, 40, 46, 47, 48, 51, 52, 56, 57, 61, 62, 63, 64], "let": [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 31, 32, 33, 34, 36, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 55, 61, 62, 63, 64], "user": [0, 5, 7, 21, 33, 37, 38, 41, 54, 62, 63, 64], "manual": [0, 62, 64], "defin": [0, 13, 14, 16, 22, 26, 32, 46, 47, 48, 50, 52, 56, 57, 61, 63, 64, 69], "input": [0, 3, 4, 6, 10, 12, 13, 14, 15, 16, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 31, 32, 33, 37, 42, 43, 45, 46, 47, 48, 49, 50, 52, 55, 56, 58, 60, 61, 62, 63, 64, 66], "": [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 30, 31, 32, 33, 34, 36, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 55, 60, 61, 62, 63, 64, 69], "name": [0, 2, 3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 19, 21, 22, 23, 24, 25, 26, 32, 33, 35, 37, 38, 39, 40, 42, 45, 47, 52, 57, 59, 60, 61, 62, 63, 64], "type": [0, 3, 4, 7, 10, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 29, 31, 32, 35, 36, 37, 39, 40, 41, 42, 46, 47, 48, 49, 50, 52, 56, 61, 62, 63, 64, 68], "second": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61, 62, 64], "infer": [0, 29, 39, 52, 61, 63, 64], "inform": [0, 11, 18, 19, 22, 35, 38, 46, 48, 61, 63, 64], "from": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 67, 68], "train": [0, 1, 12, 13, 14, 15, 20, 21, 27, 28, 31, 36, 40, 46, 52, 54, 58, 59, 60, 63, 64, 66], "data": [0, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 24, 25, 26, 29, 30, 31, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 52, 56, 59, 61, 63, 64], "These": [0, 62, 64], "two": [0, 12, 13, 14, 16, 21, 22, 26, 28, 30, 32, 33, 36, 37, 44, 46, 48, 49, 52, 55, 57, 58, 60, 61, 62, 63, 64, 68, 69], "ar": [0, 4, 7, 11, 12, 13, 14, 16, 17, 22, 26, 29, 30, 32, 33, 35, 36, 37, 39, 40, 46, 47, 48, 49, 50, 51, 52, 55, 59, 61, 62, 63, 64, 65, 69], "main": [0, 32, 40, 42, 64], "entri": [0, 64], "point": [0, 12, 40, 64], "rest": [0, 64], "need": [0, 3, 4, 6, 7, 10, 11, 12, 13, 14, 16, 17, 18, 21, 25, 29, 30, 31, 32, 33, 36, 37, 39, 40, 42, 45, 46, 47, 48, 49, 50, 52, 53, 55, 56, 61, 62, 63, 64, 68, 69], "ha": [0, 7, 16, 17, 22, 33, 37, 40, 42, 46, 48, 50, 59, 62, 63, 64, 67], "implement": [0, 2, 4, 5, 7, 8, 10, 13, 14, 17, 18, 19, 21, 22, 24, 25, 26, 28, 30, 31, 32, 33, 34, 36, 42, 43, 44, 45, 49, 50, 52, 53, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69], "A": [0, 6, 8, 9, 11, 13, 14, 16, 19, 28, 29, 32, 33, 35, 36, 40, 41, 44, 45, 54, 55, 58, 60, 63, 64, 69], "whether": [0, 21, 30, 64], "import": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 56, 57, 59, 60, 61, 63, 68], "anoth": [0, 6, 13, 14, 25, 37, 38, 40, 46, 48, 56, 61, 64, 68], "creat": [0, 2, 3, 8, 13, 14, 16, 19, 21, 22, 36, 50, 52, 55, 61, 62, 63, 64, 68, 69], "scratch": 0, "convert_sklearn": [0, 4, 7, 8, 9, 10, 11, 12, 16, 17, 18, 19, 24, 25, 26, 42, 43, 45, 59, 62, 63], "none": [0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 59, 61, 62, 63, 64], "initial_typ": [0, 4, 8, 9, 10, 11, 16, 18, 19, 26, 35, 39, 41, 45, 55, 56, 59, 62, 63, 64], "doc_str": [0, 17, 19, 20, 32], "target_opset": [0, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 34, 35, 36, 37, 38, 41, 42, 43, 44, 45, 46, 48, 49, 50, 52, 55, 56, 59, 64], "custom_conversion_funct": 0, "custom_shape_calcul": 0, "custom_pars": [0, 13, 14, 31, 42, 50, 56, 63], "intermedi": [0, 1, 12, 13, 27, 28, 38, 58, 60, 63, 64, 66], "fals": [0, 7, 11, 12, 14, 19, 24, 25, 29, 31, 33, 34, 37, 38, 39, 41, 42, 43, 45, 46, 48, 49, 50, 52, 55, 57, 62, 63, 64], "white_op": [0, 64], "black_op": [0, 5, 34, 64], "final_typ": [0, 41, 64], "dtype": [0, 2, 3, 6, 7, 12, 14, 15, 16, 17, 18, 21, 22, 23, 26, 31, 33, 35, 36, 37, 38, 39, 40, 41, 46, 49, 50, 52, 54, 56, 63, 64], "model_optim": 0, "true": [0, 5, 6, 7, 8, 12, 17, 19, 24, 25, 29, 30, 31, 33, 34, 36, 40, 42, 43, 45, 46, 47, 49, 50, 52, 55, 56, 57, 62, 63, 64], "verbos": [0, 1, 22, 27, 37, 47, 56, 64], "0": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68], "produc": [0, 4, 5, 7, 11, 12, 13, 14, 15, 17, 18, 20, 26, 31, 33, 34, 35, 36, 37, 38, 40, 50, 51, 52, 54, 62, 63, 64, 66], "an": [0, 2, 4, 5, 7, 8, 10, 12, 13, 14, 15, 16, 18, 19, 21, 22, 23, 25, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 42, 44, 46, 47, 48, 49, 52, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 68], "equival": [0, 6, 21, 35, 46, 51, 54, 55, 63, 64], "given": [0, 7, 13, 14, 17, 22, 63, 64], "support": [0, 6, 7, 17, 26, 29, 32, 33, 39, 40, 49, 52, 55, 59, 62, 63, 69], "supported_convert": 0, "For": [0, 64], "pipelin": [0, 1, 9, 12, 13, 14, 19, 27, 28, 39, 46, 48, 54, 55, 56, 58, 59, 60, 61, 65, 66, 68], "convers": [0, 6, 7, 11, 16, 17, 25, 28, 29, 31, 36, 45, 51, 52, 55, 56, 58, 60, 62, 64, 66], "make": [0, 2, 6, 22, 26, 33, 46, 48, 63, 64], "sure": [0, 46, 48], "each": [0, 4, 7, 11, 12, 18, 30, 35, 63, 64, 66], "compon": [0, 4, 18, 40, 46, 63, 64], "our": [0, 64, 65], "item": [0, 12, 31, 33, 64], "its": [0, 3, 5, 11, 12, 22, 23, 33, 34, 35, 44, 51, 52, 62, 63, 64, 67, 68], "counterpart": 0, "note": [0, 62, 64], "all": [0, 1, 5, 12, 15, 18, 28, 31, 33, 36, 39, 52, 54, 55, 56, 57, 59, 62, 63, 64], "initi": [0, 16, 17, 21, 23, 32, 36, 40, 42, 61, 63, 64], "requir": [0, 6, 7, 12, 16, 21, 22, 24, 26, 30, 32, 40, 45, 46, 48, 52, 53, 61, 62, 63, 64, 69], "can": [0, 2, 7, 10, 12, 13, 14, 15, 17, 18, 22, 24, 25, 26, 29, 31, 32, 33, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 51, 52, 53, 56, 57, 59, 61, 62, 63, 64, 65, 66, 68, 69], "also": [0, 2, 4, 9, 18, 19, 22, 32, 33, 36, 38, 39, 52, 54, 55, 56, 61, 62, 63, 64], "paramet": [0, 5, 6, 10, 12, 13, 14, 16, 31, 32, 34, 41, 44, 47, 52, 59, 62, 64], "python": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 28, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 60, 62, 64, 69], "list": [0, 1, 7, 8, 11, 17, 22, 26, 27, 28, 32, 35, 39, 40, 44, 47, 52, 55, 58, 60, 62, 63, 64, 66, 69, 70], "element": [0, 63, 64], "tupl": [0, 51, 64], "variabl": [0, 17, 19, 26, 29, 30, 33, 37, 46, 48, 63, 64], "data_typ": [0, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 21, 23, 24, 25, 26, 31, 32, 35, 37, 41, 42, 43, 45, 46, 47, 49, 50, 52, 55, 56, 59, 61, 63], "py": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 63], "graphproto": [0, 22], "modelproto": [0, 22, 23, 37], "string": [0, 3, 4, 7, 11, 17, 18, 19, 22, 26, 32, 33, 35, 39, 51, 52, 54, 61, 62, 64], "attach": [0, 69], "onto": 0, "number": [0, 3, 4, 5, 9, 11, 12, 13, 14, 15, 18, 28, 30, 31, 33, 34, 35, 40, 44, 46, 48, 49, 52, 56, 58, 59, 60, 61, 62, 63, 64, 66, 68], "exampl": [0, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44, 46, 47, 48, 49, 50, 53, 54, 55, 56, 59, 60, 61, 62, 64, 65, 67, 68, 70], "7": [0, 2, 4, 6, 8, 15, 17, 18, 19, 21, 22, 23, 24, 25, 30, 31, 32, 35, 37, 38, 40, 44, 48, 51, 55, 57, 60, 64], "1": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 60, 62, 63, 64, 68], "2": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 54, 55, 56, 57, 60, 62, 63, 64], "8": [0, 2, 3, 4, 13, 14, 15, 17, 18, 19, 22, 26, 30, 32, 35, 37, 38, 39, 40, 44, 51, 56, 57, 63, 64], "3": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 48, 51, 54, 55, 57, 60, 63, 64], "valu": [0, 4, 7, 11, 12, 16, 17, 18, 22, 33, 35, 37, 39, 40, 46, 47, 52, 55, 59, 62, 64], "choos": [0, 28, 52, 58, 59, 60, 64, 66], "latest": [0, 32], "see": [0, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 20, 24, 25, 26, 33, 34, 35, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 61, 62, 63, 64], "dictionari": [0, 7, 8, 11, 17, 33, 35, 39, 62, 64], "custom": [0, 1, 17, 19, 22, 25, 26, 27, 50, 52, 55, 60, 62, 63, 64, 69], "take": [0, 15, 21, 26, 30, 31, 52, 64], "preced": 0, "over": [0, 64], "shape": [0, 3, 4, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 30, 31, 32, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 54, 55, 56, 61, 64, 68], "calcul": [0, 12, 13, 14, 24, 25, 43, 44, 45, 46, 47, 48, 56, 64, 68], "determin": [0, 36, 64], "which": [0, 4, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 26, 31, 33, 36, 38, 46, 47, 48, 49, 50, 51, 54, 55, 59, 60, 61, 62, 63, 64], "output": [0, 1, 3, 6, 7, 10, 12, 13, 14, 15, 16, 18, 19, 21, 22, 23, 27, 28, 31, 32, 33, 36, 37, 40, 42, 44, 46, 47, 48, 49, 52, 54, 56, 57, 58, 60, 62, 63, 64, 66, 68], "expect": [0, 13, 14, 15, 25, 40, 44, 45, 46, 47, 48, 50, 52, 56, 63, 64], "particular": [0, 3, 16, 64], "task": 0, "default": [0, 6, 8, 11, 12, 16, 25, 26, 32, 33, 41, 45, 46, 49, 50, 51, 59, 61, 62, 63, 64], "classifi": [0, 1, 7, 8, 11, 12, 17, 18, 26, 27, 28, 29, 33, 46, 50, 58, 60, 63, 64, 65, 66], "regressor": [0, 1, 12, 27, 28, 40, 46, 50, 58, 60, 64, 65], "thei": [0, 4, 7, 30, 34, 35, 55, 64], "rewritten": [0, 48, 61], "fct_parser": 0, "specif": [0, 6, 7, 12, 17, 20, 22, 32, 33, 40, 46, 47, 48, 52, 55, 59, 63, 64, 66, 69], "instanc": [0, 12, 20, 62, 64], "us": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 44, 45, 46, 47, 49, 50, 51, 52, 55, 56, 57, 58, 59, 60, 62, 63, 64, 66, 68, 69], "otherwis": [0, 3, 64], "white": [0, 5, 34, 40, 64], "allow": [0, 12, 36, 38, 52, 64], "while": [0, 31, 33, 41, 64], "empti": [0, 16, 26, 52, 54, 62, 64], "black": [0, 5, 28, 58, 60, 64, 66], "blacklist": [0, 5, 34, 64], "work": [0, 7, 10, 12, 26, 29, 31, 33, 36, 40, 46, 47, 48, 50, 52, 62, 64], "same": [0, 2, 4, 7, 11, 12, 13, 14, 15, 17, 26, 29, 30, 31, 33, 35, 36, 39, 44, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 62, 63, 64, 68], "wai": [0, 1, 5, 7, 12, 16, 17, 21, 22, 23, 27, 28, 29, 33, 36, 37, 50, 55, 58, 59, 60, 61, 62, 63, 64, 68], "mandatori": [0, 52, 61, 64, 69], "overwrit": [0, 37, 52, 64], "remov": [0, 4, 6, 7, 11, 17, 18, 26, 33, 35, 36, 37, 38, 62, 64], "5": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 55, 57, 60, 64], "now": [0, 12, 13, 14, 16, 35, 36, 52, 61, 64], "mai": [0, 5, 6, 7, 8, 11, 13, 14, 16, 19, 21, 25, 29, 30, 32, 33, 35, 36, 37, 39, 44, 45, 46, 47, 48, 52, 54, 55, 56, 57, 62, 63, 64, 68, 69, 70], "add": [0, 5, 12, 13, 14, 19, 22, 23, 31, 32, 33, 34, 37, 42, 46, 48, 64, 68], "oper": [0, 1, 3, 4, 10, 11, 12, 13, 14, 16, 17, 18, 19, 27, 28, 31, 32, 33, 35, 36, 37, 42, 44, 46, 47, 48, 49, 50, 53, 56, 58, 59, 60, 61, 62, 63, 66, 69], "cast": [0, 6, 12, 17, 19, 31, 32, 33, 36, 55, 63, 64], "switch": [0, 2, 28, 44, 45, 58, 60, 62, 66], "doubl": [0, 1, 6, 7, 15, 17, 27, 29, 36, 44, 46, 47, 48, 52, 56, 64], "when": [0, 1, 7, 11, 17, 19, 20, 27, 28, 35, 37, 44, 45, 47, 55, 58, 59, 60, 63, 64, 65, 66, 68, 70], "necessari": [0, 12, 62, 64], "want": [0, 5, 36, 37, 52, 63, 64], "chang": [0, 3, 4, 5, 6, 7, 8, 16, 17, 18, 28, 32, 34, 36, 40, 47, 52, 58, 60, 62, 63, 64, 68], "prefix": [0, 12, 26, 41, 52, 63, 64], "signatur": [0, 12, 13, 14, 46, 48], "follow": [0, 3, 4, 6, 9, 12, 15, 16, 18, 21, 25, 26, 32, 33, 36, 38, 39, 40, 42, 44, 45, 46, 48, 50, 51, 55, 59, 60, 61, 62, 63, 64, 65, 67, 68], "get_nam": 0, "existing_nam": [0, 41], "librari": [0, 13, 14, 24, 25, 32, 38, 40, 42, 43, 44, 45, 46, 47, 48, 56, 59, 60, 62, 63, 66, 68], "check": [0, 4, 7, 10, 12, 13, 14, 16, 22, 31, 37, 40, 46, 47, 48, 51, 57, 59, 64], "uniqu": [0, 12, 32, 41, 46, 47, 48, 52, 56, 61, 63, 64], "modifi": [0, 4, 17, 18, 28, 35, 58, 60, 63, 66], "enabl": [0, 19, 33, 59, 62, 64], "disabl": [0, 16, 19, 64], "optimis": [0, 11, 35], "after": [0, 7, 17, 44, 61, 63, 64], "wa": [0, 2, 7, 12, 17, 20, 25, 32, 36, 37, 38, 44, 45, 51, 52, 55, 59, 60, 61, 63, 64], "reduc": [0, 1, 26, 27, 40, 44, 52, 64], "ident": [0, 21, 46, 47, 48, 49, 50, 52, 64], "displai": [0, 5, 23, 33, 34, 39, 64, 68], "progress": [0, 26, 62], "assum": [0, 6, 16, 21, 26, 36, 44, 46, 50, 61, 64, 68, 69], "heterogen": [0, 26, 64], "If": [0, 13, 14, 16, 19, 22, 25, 37, 44, 46, 48, 51, 52, 55, 56, 62, 63, 64, 68], "float": [0, 6, 7, 13, 15, 16, 17, 19, 22, 28, 29, 31, 32, 39, 40, 44, 45, 46, 47, 52, 56, 58, 60, 64, 66], "last": [0, 7, 17, 32, 33, 37, 38, 40, 52, 59, 62, 63, 64, 69], "10": [0, 3, 4, 6, 8, 10, 11, 12, 16, 18, 22, 26, 27, 30, 32, 35, 36, 37, 40, 44, 45, 46, 49, 51, 52, 55, 56, 57, 60, 63, 64], "integ": [0, 11, 31, 33, 35, 36, 39, 57, 62, 64], "we": [0, 3, 4, 7, 12, 13, 14, 16, 17, 18, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 36, 37, 38, 39, 40, 42, 44, 45, 46, 47, 48, 49, 52, 54, 56, 61, 63, 64, 65], "below": [0, 4, 7, 17, 18, 32, 36, 46, 48, 59, 62, 64], "indic": [0, 13, 14, 15, 16, 21, 61, 64], "batch": [0, 30, 46, 47, 61, 64], "size": [0, 12, 30, 40, 54, 64], "here": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61, 64], "unknown": [0, 22, 46, 47, 52, 61, 64], "common": [0, 3, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 21, 24, 25, 26, 31, 32, 33, 35, 36, 37, 40, 41, 42, 43, 44, 45, 46, 47, 49, 50, 52, 55, 56, 59, 61, 63, 64], "floattensortyp": [0, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 21, 24, 25, 31, 35, 37, 39, 41, 42, 43, 45, 55, 56, 59, 61, 63], "int64tensortyp": [0, 7, 13, 14, 17, 19, 41, 42, 56], "float_input": [0, 8, 9, 11, 19, 35, 45, 56, 59, 61], "int64_input": 0, "includ": [0, 12, 23, 24, 25, 31, 36, 42, 43, 45, 52, 53, 63, 64], "columntransform": [0, 1, 17, 26, 27, 39, 54, 55, 63, 64], "column": [0, 3, 7, 11, 17, 24, 25, 26, 33, 39, 40, 42, 43, 45, 46, 48, 51, 54, 55, 56, 57, 61, 63, 64], "sklearn": [0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 60, 61, 63, 65, 67, 68], "featur": [0, 7, 12, 17, 26, 31, 36, 46, 47, 48, 49, 50, 52, 57, 61, 64], "could": [0, 7, 17, 36, 38, 40, 48, 57, 63, 64, 69], "differ": [0, 1, 5, 6, 7, 16, 17, 23, 27, 29, 33, 34, 35, 36, 37, 39, 40, 45, 46, 47, 48, 49, 54, 55, 62, 63, 64, 69], "onli": [0, 5, 12, 15, 24, 25, 26, 31, 32, 34, 36, 38, 39, 42, 43, 45, 50, 51, 52, 57, 59, 62, 64, 67, 68], "some": [0, 5, 10, 13, 14, 16, 22, 34, 36, 37, 45, 55, 62, 63, 64, 65], "cannot": [0, 3, 4, 7, 15, 17, 18, 32, 45, 51, 54, 63, 64], "guess": [0, 52, 64], "raw": [0, 1, 7, 12, 17, 27, 33, 40, 64], "usual": [0, 6, 8, 9, 11, 13, 14, 19, 22, 25, 32, 33, 35, 36, 37, 39, 46, 47, 48, 52, 55, 56, 61, 62, 63, 64, 68], "suggest": [0, 16, 64], "have": [0, 12, 16, 32, 33, 39, 46, 47, 48, 56, 57, 62, 64, 67, 68], "them": [0, 5, 16, 34, 37, 38, 46, 57, 62, 63, 64], "obviou": 0, "do": [0, 5, 7, 12, 16, 17, 19, 20, 24, 25, 32, 34, 36, 37, 40, 42, 43, 44, 45, 48, 52, 53, 55, 62, 63, 64], "That": [0, 7, 11, 12, 13, 14, 16, 23, 25, 30, 31, 32, 33, 35, 36, 37, 39, 45, 50, 52, 61, 62, 63, 64, 66, 69], "why": [0, 7, 12, 25, 26, 29, 30, 36, 37, 44, 45, 61, 63], "model_typ": 0, "model_id": 0, "sep": [0, 26, 62], "delimit": 0, "between": [0, 2, 3, 12, 51, 63, 64], "word": [0, 62, 64], "token": [0, 26, 51, 62, 64], "short": [0, 31, 64], "It": [0, 7, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 25, 26, 29, 31, 32, 33, 36, 37, 38, 39, 40, 41, 44, 45, 46, 47, 48, 50, 52, 54, 55, 56, 57, 59, 61, 62, 63, 64, 67, 68, 69], "overwritten": 0, "extra": [0, 16, 64], "tfidfvector": [0, 1, 27, 28, 33, 55, 58, 60, 64, 70], "separ": [0, 18, 26, 33, 38, 62, 64], "model_onnx": [0, 4, 5, 7, 12, 13, 14, 17, 18, 21, 24, 25, 26, 34, 40, 42, 43, 44, 45, 55, 62, 63], "tfidf": [0, 26, 28, 54, 58, 60, 62, 64, 70], "stringtensortyp": [0, 7, 17, 26, 39, 55, 61, 62, 63], "But": [0, 22, 33, 39, 46, 47, 56, 57], "possibl": [0, 19, 22, 28, 31, 34, 38, 39, 41, 58, 60, 61, 62, 63, 64, 66, 69], "distinguish": 0, "id": [0, 5, 7, 8, 11, 33, 34, 35, 46, 62, 64], "ad": [0, 10, 28, 32, 33, 35, 44, 46, 47, 48, 49, 51, 52, 55, 58, 60, 62, 64, 67, 68, 69], "to_onnx": [0, 2, 3, 5, 6, 10, 13, 14, 15, 21, 22, 23, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 62, 64, 68], "x": [0, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 59, 61, 64, 68], "call": [0, 10, 12, 14, 16, 19, 32, 37, 48, 52, 55, 61, 62, 63, 64], "simplifi": [0, 63], "set": [0, 12, 19, 33, 35, 39, 42, 54, 61, 63, 64, 69], "must": [0, 4, 6, 7, 13, 14, 17, 18, 25, 32, 41, 46, 48, 52, 56, 63, 64, 68, 69], "inherit": [0, 10, 64], "onnxoperatormixin": [0, 10, 52, 64], "method": [0, 4, 8, 12, 13, 14, 16, 18, 37, 52, 62, 63, 64], "case": [0, 5, 7, 16, 21, 31, 32, 34, 36, 39, 40, 45, 46, 47, 48, 50, 56, 57, 60, 61, 62, 63, 64], "fail": [0, 4, 5, 9, 13, 14, 15, 16, 17, 18, 19, 24, 25, 33, 34, 36, 37, 45, 55, 56, 63, 64], "object": [0, 7, 12, 17, 19, 26, 36, 39, 40, 42, 45, 63], "without": [0, 2, 19, 26, 35, 61, 64, 66], "ani": [0, 12, 13, 14, 15, 19, 31, 33, 40, 46, 47, 52, 62, 63, 64], "associ": [0, 13, 14, 19, 24, 25, 33, 42, 43, 45, 46, 48, 51, 57, 64, 68], "map": [0, 11, 19, 31, 35, 42, 51, 62, 63, 64], "error": [0, 1, 5, 16, 18, 19, 25, 27, 29, 32, 34, 45, 46, 48, 51, 55, 63, 64, 68], "messag": [0, 19, 25, 35, 45, 64, 68], "explicit": [0, 6, 19, 55, 64], "enough": [0, 19, 22, 55, 62], "logger": [0, 19], "getlogg": [0, 19], "setlevel": [0, 19], "debug": [0, 19, 37], "basicconfig": [0, 19], "level": [0, 19, 61], "illustr": [0, 64], "what": [0, 5, 17, 19, 28, 33, 34, 35, 39, 46, 47, 48, 49, 50, 58, 60, 62, 63, 66], "look": [0, 4, 7, 13, 14, 15, 16, 17, 18, 21, 22, 23, 29, 31, 36, 39, 50, 55, 59, 64, 68], "like": [0, 13, 14, 17, 21, 38, 50, 64], "section": [0, 7, 12, 38, 68], "cover": [0, 13, 14, 25, 46, 48, 56, 68], "avail": [0, 5, 7, 17, 22, 30, 32, 34, 35, 59, 63, 65], "from_sklearn": 0, "To": [0, 36, 44, 48, 64, 68], "find": [0, 13, 14, 16, 17, 18, 25, 46, 48, 52, 56, 63, 64, 68], "get": [0, 12, 13, 14, 25, 26, 35, 37, 42, 46, 48, 54, 56, 62, 63, 64, 68], "retriev": [0, 17, 33, 38, 46, 47, 48, 49, 52, 56, 61, 64], "whose": [0, 22, 64], "update_registered_convert": [0, 12, 13, 14, 24, 25, 31, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 55, 56, 63, 68], "alia": [0, 12, 13, 14, 19, 31, 42, 50, 56, 63], "shape_fct": 0, "convert_fct": 0, "updat": [0, 19, 32, 63, 64], "so": [0, 4, 7, 13, 14, 17, 21, 24, 25, 33, 42, 43, 45, 46, 48, 56, 64, 68], "insert": [0, 6, 40, 45, 55, 64], "should": [0, 6, 10, 29, 33, 36, 39, 45, 46, 47, 50, 56, 59, 62, 63, 64], "fast": [0, 28, 58, 60, 64, 69], "whole": [0, 12, 24, 25, 33, 42, 43, 45, 63], "comput": [0, 2, 3, 6, 7, 16, 21, 22, 29, 30, 31, 33, 36, 37, 44, 46, 47, 48, 49, 50, 52, 56, 59, 61, 62, 63, 64, 66], "parallel": [0, 30, 56, 64], "rais": [0, 5, 10, 12, 13, 14, 15, 25, 29, 31, 33, 34, 40, 42, 46, 47, 48, 49, 50, 52, 54, 56, 63, 64, 68], "except": [0, 4, 5, 7, 13, 14, 15, 16, 17, 24, 25, 26, 32, 34, 39, 40, 45, 46, 48, 51, 55, 56, 63, 64, 68], "alreadi": [0, 40, 48, 63], "exist": [0, 12, 13, 21, 22, 25, 40, 45, 48, 51, 52, 59, 62, 63, 64, 66, 69], "well": [0, 16, 29, 32, 41, 46, 47, 48, 50, 64], "shape_calcul": [0, 10, 24, 25, 42, 43, 44, 45, 55, 63], "calculate_linear_classifier_output_shap": [0, 24, 25, 42, 43, 45, 55, 63], "operator_convert": [0, 24, 25, 43, 44, 45, 55, 62, 63], "randomforest": [0, 63], "convert_sklearn_random_forest_classifi": [0, 63], "sgdclassifi": [0, 33, 63, 64], "sklearnlinearclassifi": [0, 41, 63], "zipmap": [0, 1, 7, 14, 17, 19, 24, 25, 27, 38, 39, 41, 42, 43, 45, 55, 64], "output_class_label": [0, 33], "raw_scor": [0, 8, 62], "doe": [0, 4, 6, 7, 12, 13, 14, 15, 17, 18, 21, 22, 26, 32, 34, 36, 39, 41, 42, 45, 47, 51, 52, 53, 55, 61, 62, 63, 64, 68], "declar": [0, 12, 13, 49, 50, 51, 52, 64], "automat": [0, 52, 57, 59], "handl": [0, 4, 7, 13, 15, 17, 18, 25, 31, 39, 45, 47, 62, 63, 64], "update_registered_pars": [0, 63], "parser_fct": 0, "parse_sklearn": 0, "helper": [0, 4, 17, 18, 22, 37, 38, 42, 63], "onnx_help": [0, 17, 38], "enumerate_model_node_output": [0, 17], "add_nod": [0, 12, 13, 42, 47], "enumer": [0, 4, 11, 18, 26, 35, 40, 64], "load_onnx_model": [0, 17], "onnx_file_or_byt": 0, "load": [0, 2, 4, 5, 7, 13, 14, 15, 17, 18, 20, 23, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 46, 48, 51, 52], "file": [0, 16, 27, 40, 58, 64], "byte": [0, 15, 40], "select_model_inputs_output": [0, 17, 38], "unneed": [0, 38], "save_onnx_model": [0, 17], "filenam": [0, 23], "save": [0, 7, 12, 17, 24, 25, 26, 29, 42, 43, 45, 64], "_pars": [0, 35, 42], "deleg": 0, "noth": [0, 52, 64], "invok": [0, 39, 64], "correct": 0, "pars": [0, 19], "accord": [0, 64], "e": [0, 5, 7, 13, 14, 15, 16, 17, 24, 25, 32, 34, 36, 39, 40, 45, 46, 48, 51, 55, 56, 63, 64, 68], "g": [0, 54, 64], "onehotencod": [0, 7, 17, 39, 63, 64], "logisticregress": [0, 2, 4, 7, 8, 9, 11, 13, 14, 15, 17, 18, 26, 33, 35, 38, 41, 61, 62, 63, 64, 68], "parse_sklearn_model": [0, 19], "put": 0, "abstract": 0, "framework": [0, 2, 40, 64], "seamlessli": 0, "machin": [0, 10, 12, 22, 32, 52, 61, 63, 64, 66], "tool": [0, 5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 31, 57, 61, 63], "check_input_and_output_numb": [0, 31], "input_count_rang": [0, 31], "output_count_rang": [0, 31], "minim": 0, "maxim": [0, 64], "twice": [0, 16, 64], "infinit": [0, 57, 64], "rang": [0, 6, 12, 21, 31, 32, 36, 40, 44, 48, 54, 64], "infin": [0, 64], "you": [0, 12, 13, 14, 25, 26, 29, 30, 32, 37, 45, 46, 48, 51, 56, 59, 61, 62, 64, 68], "param": [0, 12, 45, 46, 47, 48, 49, 50, 52, 64], "format": [0, 2, 11, 12, 13, 14, 15, 20, 22, 29, 35, 40, 59, 64], "check_input_and_output_typ": 0, "good_input_typ": 0, "good_output_typ": 0, "mean": [0, 3, 10, 12, 13, 14, 19, 25, 33, 36, 38, 46, 47, 48, 49, 50, 52, 56, 57, 63, 64, 66, 68], "skip": [0, 8, 22, 64], "_contain": 0, "sklearnmodelcontainernod": 0, "sklearn_model": 0, "properti": 0, "input_nam": [0, 9, 10, 15, 19, 41, 45, 59, 61], "correspond": [0, 13, 14, 25, 46, 48, 54, 56, 63, 64, 68], "output_nam": [0, 3, 10, 14, 15, 21, 22, 23, 31, 36, 40, 41, 46, 48, 49, 50, 52, 56], "modelcomponentcontain": [0, 62], "registered_model": 0, "In": [0, 4, 5, 7, 13, 14, 17, 18, 19, 24, 29, 30, 33, 34, 36, 37, 39, 42, 43, 44, 45, 46, 47, 48, 50, 51, 52, 56, 57, 61, 64], "phase": [0, 64], "collect": [0, 63, 64], "materi": 0, "build": [0, 6, 26, 36, 40, 46, 63], "encapsul": 0, "add_initi": [0, 13, 47], "onnx_typ": 0, "content": [0, 23, 40, 64], "tensorproto": [0, 13, 14, 22, 56, 64], "final": [0, 4, 7, 10, 11, 12, 17, 18, 26, 29, 35, 40, 56, 64], "tensor": [0, 15, 40, 44, 64], "flatten": [0, 40, 64], "arrai": [0, 2, 3, 7, 12, 14, 15, 16, 18, 21, 22, 23, 26, 28, 31, 33, 35, 37, 39, 40, 41, 51, 52, 54, 55, 56, 58, 60, 63, 64, 66, 68], "add_input": 0, "_parser": 0, "op_typ": [0, 3, 17, 19, 21, 22, 23, 32, 42], "op_domain": [0, 12, 42], "op_vers": [0, 3, 10, 14, 21, 22, 23, 31, 42, 46, 48, 49, 50, 52, 56, 64], "attr": [0, 12], "nodeproto": [0, 22], "domain": [0, 3, 17, 19, 20, 21, 22, 23, 32, 36, 42, 52, 59, 64, 69], "found": [0, 13, 14, 16, 19, 22, 25, 45, 46, 48, 56, 59, 63, 64, 68], "pool": [0, 64], "conv": [0, 19, 64], "consid": [0, 5, 24, 25, 26, 32, 34, 42, 43, 45, 55, 64], "ai": [0, 3, 12, 17, 19, 21, 22, 23, 24, 25, 32, 36, 42, 43, 44, 45, 52, 55, 56, 64], "ml": [0, 4, 7, 12, 17, 18, 19, 21, 24, 25, 32, 36, 42, 43, 44, 45, 52, 55, 56, 59, 60, 63, 64, 69], "try": [0, 4, 5, 7, 13, 14, 15, 16, 17, 18, 22, 24, 25, 26, 29, 30, 32, 34, 36, 37, 39, 40, 42, 43, 44, 45, 46, 48, 49, 50, 51, 55, 56, 63, 64, 68], "kei": [0, 26, 36, 39, 64], "attribut": [0, 3, 12, 17, 18, 19, 22, 29, 32, 33, 42, 46, 47, 48, 49, 50, 52, 63, 64], "respect": [0, 64], "add_output": 0, "_topologi": [0, 32], "onnx_nam": [0, 12, 19], "raw_oper": [0, 12, 13, 14, 19, 31, 42, 44, 46, 47, 48, 49, 50, 52, 56], "scope_inst": 0, "raw_nam": 0, "hold": [0, 64], "provid": [0, 3, 4, 5, 6, 7, 8, 9, 37, 59, 61, 63, 64], "unus": [0, 7, 12], "get_unique_operator_nam": [0, 12, 13, 47], "seed": [0, 64], "base": [0, 6, 10, 12, 13, 14, 22, 26, 36, 46, 47, 48, 49, 50, 52, 61, 62, 64], "get_unique_variable_nam": [0, 13, 47], "renam": [0, 62], "default_batch_s": 0, "sklearnmodelcontain": 0, "fill": [0, 5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 57, 64], "being": [0, 13, 14, 19, 25, 26, 44, 46, 48, 56, 64, 68], "convert_topologi": [0, 19], "model_nam": [0, 40], "channel_first_input": 0, "remove_ident": 0, "go": [0, 64], "denot": [0, 36, 64], "would": [0, 12, 31, 33, 50, 52, 61, 62, 64, 67], "assign": [0, 64], "metadata": [1, 27], "draw": [1, 27, 38, 64], "onnx": [1, 5, 6, 8, 11, 15, 20, 27, 28, 31, 33, 34, 35, 36, 37, 42, 43, 44, 45, 53, 55, 56, 58, 60, 62, 63, 65, 66, 67, 68], "runtim": [1, 5, 16, 20, 27, 28, 32, 33, 34, 36, 40, 50, 58, 59, 60, 63, 64, 66], "backend": [1, 22, 27, 56, 59, 62, 63], "log": [1, 27, 32, 64], "probabl": [1, 2, 7, 13, 14, 15, 17, 19, 27, 33, 35, 36, 38, 39, 40, 41, 42, 46, 50, 54, 56, 62, 64], "score": [1, 26, 27, 32, 33, 37, 56, 64], "convert": [1, 15, 18, 21, 23, 26, 27, 28, 30, 32, 33, 36, 37, 39, 40, 50, 53, 54, 55, 58, 60, 66, 69, 70], "predict": [1, 2, 4, 6, 12, 13, 14, 15, 16, 18, 21, 22, 26, 27, 30, 32, 33, 36, 37, 38, 44, 46, 50, 52, 53, 56, 59, 61, 63, 64, 66], "investig": [1, 27, 28, 29, 58, 60, 66], "compar": [1, 12, 16, 27, 29, 30, 33, 36, 37, 49, 54, 55, 63], "cdist": [1, 27, 33, 62], "scipi": [1, 7, 17, 27, 60, 64], "lightgbm": [1, 27, 28, 54, 55, 56, 58, 59, 60, 63, 65], "vector": [1, 7, 15, 17, 27, 52, 57, 64], "benchmark": [1, 27, 28, 58, 60, 66], "xgboost": [1, 27, 28, 54, 55, 56, 58, 59, 60, 63, 65], "nmf": [1, 27, 52, 53, 64], "decomposit": [1, 4, 18, 26, 27, 40, 48, 63, 64], "discrep": [1, 26, 27, 28, 45, 51, 55, 58, 70], "standardscal": [1, 7, 12, 17, 24, 25, 27, 36, 37, 42, 43, 45, 55, 63, 64], "gaussianprocessorregressor": [1, 27], "plai": [1, 10, 14, 27, 64], "walk": [1, 27], "through": [1, 16, 26, 27, 37, 38, 62, 64], "neither": [1, 27, 64], "nor": [1, 27, 64], "altern": [1, 4, 9, 18, 27, 33], "write": [1, 2, 7, 9, 13, 14, 15, 17, 23, 24, 25, 26, 27, 31, 38, 40, 42, 43, 45, 47, 52, 55, 59, 60, 61, 64, 65, 66], "your": [1, 13, 14, 27], "own": [1, 13, 14, 22, 26, 27, 65, 67], "download": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "code": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61, 63, 64, 66], "auto_examples_python": 1, "zip": [1, 7, 17, 22, 28, 40, 63, 64], "jupyt": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "notebook": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "auto_examples_jupyt": 1, "gener": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 64], "sphinx": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "click": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "full": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 63, 64], "extend": [2, 12, 16, 25, 32, 39, 40, 45, 60, 63, 64], "api": [2, 13, 14, 24, 25, 40, 42, 43, 45, 46, 48, 59, 63], "run": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 63, 64], "simpl": [2, 6, 8, 12, 20, 23, 30, 32, 46, 48, 60, 64], "logist": [2, 4, 9, 15, 17, 18, 20, 33, 64], "regress": [2, 4, 9, 15, 18, 20, 29, 33, 36, 64], "dataset": [2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43, 45, 46, 47, 48, 49, 50, 52, 59, 61], "load_iri": [2, 5, 8, 9, 11, 13, 14, 15, 19, 24, 25, 31, 33, 34, 35, 37, 38, 41, 42, 43, 45, 46, 47, 48, 49, 50, 52, 55, 59, 61], "linear_model": [2, 4, 7, 8, 9, 11, 13, 14, 15, 17, 18, 26, 29, 30, 33, 35, 38, 41, 61, 63, 64, 68], "numpi": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 63, 64, 68], "get_devic": 2, "np": [2, 3, 4, 5, 6, 7, 10, 13, 14, 15, 17, 18, 21, 22, 26, 31, 56, 57, 64], "graph": [2, 4, 5, 6, 8, 16, 18, 19, 21, 23, 28, 29, 30, 32, 33, 34, 36, 44, 47, 49, 55, 58, 60, 62, 63, 64, 66, 68], "y": [2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 55, 59, 61, 64], "logreg": [2, 26], "c": [2, 7, 12, 13, 14, 17, 30, 38, 39, 53, 54, 55, 56, 63, 64], "1e5": 2, "fit": [2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 55, 56, 57, 59, 61, 63, 64], "astyp": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 24, 25, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 55, 56, 57, 59, 61], "float32": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 55, 56, 57, 59, 61, 63, 64], "logreg_iri": [2, 9, 15, 20, 61], "open": [2, 7, 9, 12, 15, 17, 18, 23, 24, 25, 26, 38, 40, 42, 43, 45, 55, 57, 59, 61], "wb": [2, 7, 9, 12, 15, 17, 18, 23, 24, 25, 26, 38, 40, 42, 43, 45, 55, 59, 61, 64], "f": [2, 7, 9, 12, 15, 17, 18, 22, 23, 24, 25, 26, 36, 38, 40, 42, 43, 44, 45, 54, 55, 59, 61, 64], "serializetostr": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 59, 61, 63], "rep": 2, "prepar": 2, "cpu": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 35, 60, 61], "6": [2, 3, 4, 12, 13, 14, 15, 18, 19, 22, 31, 32, 35, 37, 39, 40, 44, 46, 47, 48, 49, 50, 51, 57, 62, 64], "4": [2, 3, 4, 8, 9, 10, 11, 12, 13, 15, 17, 18, 19, 21, 22, 23, 24, 30, 31, 32, 35, 37, 39, 40, 44, 46, 48, 51, 52, 55, 57, 59, 60, 61, 63, 64], "label": [2, 7, 11, 13, 14, 15, 17, 19, 32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 45, 46, 50, 56, 57, 59, 64], "proba": 2, "print": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 63, 64, 68], "04301583021879196": 2, "9569841623306274": 2, "devic": 2, "depend": [2, 5, 21, 34, 52, 55, 61, 62, 63, 64, 67], "how": [2, 6, 11, 12, 16, 18, 20, 23, 24, 25, 26, 31, 33, 38, 40, 41, 42, 43, 44, 45, 46, 48, 49, 52, 53, 55, 63, 64, 65, 67, 68], "compil": [2, 6], "gpu": [2, 36, 59, 61], "directli": [2, 39, 64], "other": [2, 12, 15, 18, 22, 23, 28, 35, 38, 47, 57, 58, 59, 60, 61, 62, 63, 64, 68], "easier": [2, 33, 47, 64], "multipl": [2, 6, 12, 15, 39, 44, 62, 63, 64], "version": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 33, 35, 42, 44, 52, 59, 60, 62, 64, 69], "__version__": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 35, 44, 60], "23": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 35, 54, 60], "dev0": [2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 35, 60, 64], "14": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 30, 31, 32, 35, 36, 40, 44, 50, 51, 55, 56, 60, 64], "15": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, 35, 36, 37, 40, 41, 60, 64], "total": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 64], "time": [2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 31, 32, 33, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 50, 51, 52, 53, 54, 55, 56, 57, 61, 63, 64], "script": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "minut": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "112": [2, 16, 35], "plot_backend": [2, 27], "ipynb": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57], "galleri": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 68], "focus": 3, "execut": [3, 27, 58, 62, 64], "pairwis": [3, 62], "distanc": [3, 62, 64], "pprint": [3, 7, 16, 17, 33, 38, 39, 51, 54], "timeit": [3, 4, 5, 11, 34, 35, 44], "timer": 3, "spatial": [3, 64], "tqdm": [3, 30, 44, 55], "panda": [3, 4, 7, 17, 18, 30, 39, 44, 49, 55, 56, 57], "datafram": [3, 4, 7, 17, 18, 28, 30, 44, 49, 55, 56, 57, 58, 60, 61, 66], "rt": [3, 4, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25, 26, 35, 42, 43, 45, 55, 59, 61, 64], "inferencesess": [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 59, 61, 63], "algebra": [3, 10, 14, 21, 22, 23, 31, 46, 48, 49, 50, 52, 56, 64], "custom_op": 3, "onnxcdist": 3, "ones": [3, 44, 64], "metric": [3, 12, 26, 64], "euclidean": 3, "op": [3, 12, 13, 14, 19, 31, 46, 47, 48, 49, 50, 52, 56, 63, 64], "12": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 29, 32, 34, 35, 37, 38, 40, 42, 43, 44, 45, 51, 54, 55, 64], "z": [3, 22, 36, 51, 54, 64], "onx": [3, 8, 9, 10, 11, 16, 19, 21, 29, 30, 32, 35, 36, 37, 38, 39, 41, 42, 45, 46, 47, 48, 49, 50, 51, 52, 54, 56, 59, 61], "ir_vers": [3, 17, 19, 20, 21, 22, 23, 32], "producer_nam": [3, 17, 19, 20, 21, 22, 23, 32], "producer_vers": [3, 17, 19, 20, 21, 22, 23, 32], "model_vers": [3, 17, 19, 20, 21, 22, 23, 32], "node": [3, 6, 12, 16, 17, 19, 21, 22, 23, 32, 33, 40, 42, 44, 46, 48, 52, 59, 63, 64, 66, 69], "cd_cdist": 3, "com": [3, 7, 12, 13, 14, 17, 25, 40, 46, 48, 51, 54, 56, 64, 68], "microsoft": [3, 59], "tensor_typ": [3, 17, 19, 21, 22, 23, 32], "elem_typ": [3, 17, 19, 21, 22, 23, 32], "dim": [3, 17, 19, 21, 22, 23, 32, 64], "dim_valu": [3, 17, 19, 21, 22, 23, 32, 64], "opset_import": [3, 17, 19, 21, 22, 23, 32, 42], "sess": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 59, 61, 63], "cpuexecutionprovid": [3, 4, 5, 6, 7, 8, 9, 37, 59, 61, 63], "re": [3, 8, 10, 11, 12, 15, 21, 22, 26, 31, 32, 33, 35, 38, 40, 44, 56, 62, 64], "9999999": 3, "measure_tim": [3, 30, 49], "stmt": [3, 5, 34], "context": [3, 29, 30, 33, 49, 50, 64], "repeat": [3, 11, 30, 35, 49, 62, 64], "100": [3, 11, 30, 32, 35, 36, 44, 45, 49, 55, 56, 64], "20": [3, 5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 32, 44, 64], "tim": 3, "global": [3, 4, 5, 34], "dev": 3, "dict": [3, 11, 12, 26, 33, 35, 39, 40, 42, 44, 49, 55, 63, 64], "averag": [3, 12, 30, 35, 46, 47, 48, 49, 50, 64], "deviat": [3, 12, 30, 49, 64], "min_exec": [3, 30, 49], "min": [3, 12, 16, 36, 37, 54, 64], "max_exec": [3, 30, 49], "max": [3, 6, 12, 13, 14, 16, 29, 32, 36, 37, 44, 46, 47, 48, 49, 50, 52, 54, 56, 60, 64], "nrow": 3, "ncol": 3, "time_scipi": 3, "9": [4, 8, 11, 12, 13, 14, 17, 18, 19, 22, 32, 35, 37, 38, 44, 48, 51, 57, 60, 64], "748901000051546e": [], "06": [3, 4, 15, 18, 27, 30, 37, 44, 49, 50, 64], "2147272417589905e": [], "5750000000025465e": [], "05": [3, 6, 13, 14, 15, 16, 27, 29, 35, 36, 38, 44, 64], "364999999344036e": [], "time_ort": 3, "ort": [3, 22, 30, 36], "1592300499955853e": [], "1086234201962248e": [], "3410000000012588e": [], "854999998992752e": [], "longer": [3, 52], "1000": [3, 4, 18, 30, 35, 40, 44, 49, 64], "10000": [3, 4, 5, 6, 30, 34, 36, 64], "new": [3, 10, 13, 14, 16, 21, 24, 25, 28, 31, 32, 33, 47, 51, 53, 55, 56, 58, 59, 60, 64, 67, 68, 69], "random": [3, 9, 19, 24, 25, 30, 42, 43, 44, 45, 46, 48, 55, 56, 63, 64], "randn": [3, 44], "n": [3, 12, 13, 14, 17, 22, 26, 29, 30, 36, 37, 40, 42, 44, 46, 48, 51, 52, 54, 56, 62, 64], "append": [3, 7, 12, 13, 14, 17, 21, 30, 31, 36, 40, 42, 44, 49, 50, 55, 56, 64], "df": [3, 4, 7, 17, 18, 30, 44, 55, 57], "plot": [3, 12, 29, 30, 32, 36, 39, 40, 44, 47], "00": [3, 18, 27, 30, 44, 48, 55, 58], "75": [3, 13, 14, 19, 26, 30, 32, 44, 45, 55, 64], "59it": [], "02": [11, 13, 14, 18, 27, 35, 38, 44, 48, 58], "39it": [], "72it": [], "000005": 55, "000010": 30, "436735": [], "000011": 49, "000012": 3, "927082": [], "000082": [], "000034": 30, "372025": [], "000737": [], "000265": [], "776992": [], "507": [], "plot_benchmark_cdist": [3, 27], "up": [4, 17, 26, 33, 52, 62, 63, 64], "reus": [4, 7, 12, 13, 14, 17, 18, 36, 48, 64], "chain": [4, 18], "pca": [4, 12, 18, 48, 62, 64], "There": [4, 7, 16, 17, 18, 21, 23, 26, 32, 33, 37, 47, 51, 52, 53, 61, 64], "becaus": [4, 7, 13, 14, 16, 17, 18, 26, 32, 35, 36, 45, 55, 62, 64], "imput": [4, 7, 17, 18, 63, 64], "part": [4, 7, 12, 17, 18, 26, 33, 52, 62, 64, 65, 66, 69], "comment": [4, 7, 17, 18, 52], "start": [4, 7, 12, 15, 17, 18, 29, 31, 56, 59, 64, 65], "collect_intermediate_step": [4, 18, 37, 63], "compare_object": [4, 63], "onnxconverter_common": [4, 13, 18, 47], "pd": [4, 7, 17, 18, 56, 57], "pipe": [4, 18, 24, 25, 33, 37, 39, 40, 42, 43, 45, 55, 62], "digit": [4, 12, 18], "load_digit": [4, 12, 18], "x_digit": [4, 18], "y_digit": [4, 18], "home": [4, 7, 9, 16, 17, 18, 32, 33, 35, 38, 40, 55], "xadupr": [4, 9, 16, 18, 32, 33, 35, 38, 55], "github": [4, 5, 7, 9, 12, 13, 14, 16, 17, 18, 22, 24, 25, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44, 46, 48, 51, 56, 59, 64, 68], "_logist": [4, 9, 18, 33], "458": [4, 9, 18, 33], "convergencewarn": [4, 9, 16, 18, 33, 38], "lbfg": [4, 7, 9, 17, 18, 33, 63, 64], "converg": [4, 9, 18, 30, 33, 38], "statu": [4, 9, 18, 33], "stop": [4, 9, 18, 33, 64], "NO": [4, 9, 18, 33], "iter": [4, 9, 18, 19, 33, 64], "reach": [4, 9, 18, 33, 38], "limit": [4, 9, 18, 26, 33, 64], "increas": [4, 9, 11, 18, 33, 38, 64], "max_it": [4, 8, 9, 11, 18, 33, 35, 38, 55, 63, 64], "scale": [4, 6, 9, 12, 17, 18, 33, 56, 64], "shown": [4, 7, 9, 18, 33, 64], "http": [4, 7, 9, 12, 13, 14, 17, 18, 25, 29, 33, 40, 46, 48, 51, 54, 56, 64, 68], "org": [4, 5, 7, 9, 13, 14, 17, 18, 24, 29, 30, 33, 34, 36, 37, 39, 40, 42, 43, 44, 51, 64], "stabl": [4, 9, 18, 33, 64], "preprocess": [4, 6, 7, 9, 17, 18, 22, 24, 25, 31, 33, 36, 37, 39, 40, 42, 43, 45, 46, 48, 55, 56, 63, 64], "html": [4, 5, 7, 9, 13, 14, 17, 18, 24, 29, 30, 33, 34, 36, 37, 39, 42, 43, 44, 51, 64], "pleas": [4, 5, 7, 9, 13, 14, 15, 17, 18, 24, 29, 30, 33, 34, 36, 37, 39, 42, 43, 44, 51, 64], "refer": [4, 9, 18, 33, 52, 64], "document": [4, 9, 12, 16, 18, 22, 26, 29, 33, 51, 63], "solver": [4, 7, 9, 13, 14, 17, 18, 33, 38, 41, 63, 64], "option": [4, 5, 6, 7, 8, 9, 11, 14, 15, 16, 17, 18, 24, 25, 26, 28, 30, 34, 36, 37, 38, 39, 41, 42, 43, 44, 45, 50, 52, 55, 58, 59, 60, 63, 64, 66, 68], "n_iter_i": [4, 9, 18, 33], "_check_optimize_result": [4, 9, 18, 33], "x27": [4, 7, 13, 14, 17, 18, 24, 29, 30, 36, 37, 39, 42, 43, 51], "environ": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "rerun": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "cell": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51, 64], "show": [4, 5, 7, 12, 13, 14, 17, 18, 24, 29, 30, 32, 34, 36, 37, 38, 39, 40, 41, 42, 43, 44, 48, 50, 51, 52, 53, 55, 63, 64, 65, 67, 68], "represent": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51, 64], "trust": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "On": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "unabl": [4, 5, 7, 13, 14, 17, 18, 24, 25, 29, 30, 34, 36, 37, 39, 40, 42, 43, 44, 46, 48, 51, 56, 68], "render": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51, 54], "page": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "nbviewer": [4, 5, 7, 13, 14, 17, 18, 24, 29, 30, 34, 36, 37, 39, 42, 43, 44, 51], "pipelinepipelin": [4, 7, 17, 18, 24, 29, 36, 37, 39, 42, 43], "pcapca": [4, 18], "logisticregressionlogisticregress": [4, 7, 13, 14, 17, 18], "skl": [4, 8, 18, 30, 31, 36], "predict_proba": [4, 7, 8, 13, 14, 17, 18, 24, 25, 26, 37, 39, 42, 43, 45, 55, 56], "onx_pr": [4, 18], "99998536e": [4, 18], "01": [3, 4, 8, 11, 13, 14, 18, 27, 30, 35, 38, 48, 54, 58, 63, 64], "99063158e": [4, 18], "19": [4, 18, 22, 32, 44, 64], "48548953e": [4, 18], "55765726e": [4, 18], "08": [4, 8, 18, 36, 44, 54, 64], "32559745e": [4, 18], "21314653e": [4, 18], "98959930e": [4, 18], "22513839e": [4, 18], "07": [3, 4, 11, 15, 18, 29, 30, 35, 37, 44, 46, 47, 48, 49, 50, 56, 64], "23871272e": [4, 18], "98148509e": [4, 18], "47648437e": [4, 18], "99999301e": [4, 18], "05811967e": [4, 18], "49298733e": [4, 18], "13": [4, 18, 22, 32, 44, 51, 54, 56, 64], "48627417e": [4, 18], "75686484e": [4, 18], "39025135e": [4, 18], "11": [3, 4, 17, 18, 22, 24, 25, 32, 33, 44, 51, 60, 64], "95899938e": [4, 18], "50528833e": [4, 18], "30607478e": [4, 18], "99998569e": [4, 18], "99062501e": [4, 18], "48550355e": [4, 18], "55766493e": [4, 18], "32561811e": [4, 18], "21315134e": [4, 18], "98961930e": [4, 18], "22514706e": [4, 18], "23872494e": [4, 18], "98151529e": [4, 18], "47648956e": [4, 18], "99999285e": [4, 18], "05811991e": [4, 18], "49297488e": [4, 18], "48627885e": [4, 18], "75685548e": [4, 18], "39024415e": [4, 18], "95899520e": [4, 18], "50529058e": [4, 18], "30607344e": [4, 18], "No": [4, 64], "369610077999994": [], "31605130999997755": [], "imagin": [4, 18, 64], "wrong": [4, 18, 29, 63], "steal": [4, 18], "smaller": [4, 18, 46, 47, 48, 62, 64], "assert": [4, 18, 46], "len": [4, 13, 14, 16, 18, 26, 31, 32, 33, 40, 42, 56, 60, 64], "onnx_step": [4, 18, 37, 63], "onnx_output": [4, 18, 37, 63], "skl_output": [4, 18, 37, 63], "_debug": [4, 18, 37, 63], "transform": [4, 7, 10, 13, 14, 17, 18, 21, 25, 26, 31, 37, 39, 40, 46, 47, 48, 49, 51, 54, 55, 56, 57, 63, 64, 68], "__class__": [4, 31, 46, 47, 48, 49, 50, 52, 56], "els": [4, 7, 12, 13, 14, 17, 31, 36, 40, 44, 49, 54, 55, 64], "_pca": [4, 18], "595577918999993": [], "12279540399998723": [], "0600300350000111": [], "22988680800000338": [], "763": [], "plot_benchmark_pipelin": [4, 27], "dedic": [5, 12, 22, 62], "miss": [5, 7, 17, 35, 55, 64, 68], "behaviour": [5, 8, 11, 26, 34, 56, 62], "o": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 32, 33, 38, 40, 41, 57, 63, 64], "matplotlib": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 32, 36, 40, 44, 57], "pyplot": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 32, 36, 40, 44, 57], "plt": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 32, 36, 40, 44, 57], "net_draw": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "getpydotgraph": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "getopnodeproduc": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "mixtur": [5, 34, 64], "model_select": [5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 33, 34, 35, 36, 38, 41, 45, 59, 61, 64], "train_test_split": [5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 33, 34, 35, 36, 38, 41, 45, 59, 61], "x_train": [5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 33, 34, 35, 36, 38, 40, 41, 45, 59, 61], "x_test": [5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 33, 34, 35, 36, 38, 41, 45, 59, 61], "gaussianmixturegaussianmixtur": [5, 34], "score_sampl": [5, 33, 34], "xt": [5, 34, 64], "53209789": [], "29526047": [], "38524978": [], "56312392": [], "39794725": [], "5320976": [], "2952611": [], "38525": [], "5631275": [], "397947": [], "pydot_graph": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "rankdir": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "tb": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57], "node_produc": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "docstr": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "color": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 57], "yellow": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 40, 57], "fillcolor": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 57], "style": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 57, 64], "write_dot": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "dot": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 39, 57, 63], "system": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63, 64], "gdpi": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 57], "300": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 44, 56, 57, 64], "tpng": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 63], "imag": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57, 64], "imread": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57], "png": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57], "fig": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 32, 36, 40, 57], "ax": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 29, 32, 33, 34, 36, 37, 39, 40, 42, 43, 44, 45, 46, 48, 49, 50, 52, 57, 64], "subplot": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 32, 36, 40, 44, 57], "figsiz": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 36, 40, 57], "40": [5, 6, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 32, 39, 44], "imshow": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57], "axi": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 32, 39, 46, 47, 49, 50, 52, 55, 56, 57, 64], "off": [5, 6, 7, 10, 12, 13, 14, 17, 21, 22, 23, 24, 25, 26, 57], "5287": 5, "8425": 5, "tell": [5, 13, 14, 16, 17, 19, 25, 26, 33, 34, 40, 45, 46, 48, 52, 68], "model_onnx2": [5, 34], "sess2": [5, 6, 8, 11, 34, 35, 36, 39, 49], "mixture2": 5, "4921": 5, "13264": 5, "2420818140000165": [], "3835559209999815": [], "much": [5, 11, 16, 30, 34, 35, 44, 55, 64], "faster": [5, 11, 26, 30, 34, 35], "mani": [5, 11, 15, 16, 24, 25, 28, 29, 34, 35, 36, 37, 42, 43, 45, 48, 58, 60, 61, 63, 64, 66, 67], "whitelist": [5, 34], "runtimeerror": [5, 10, 13, 14, 15, 16, 31, 32, 34, 40, 42, 51], "noqa": [5, 6, 10, 22, 24, 25, 31, 42, 43, 44, 51, 57], "18": [13, 22, 30, 32, 59, 64], "314": 64, "plot_black_op": [5, 27], "veri": [6, 9, 16, 17, 19, 46, 47, 50, 56], "basic": [6, 9, 12, 16, 19, 22, 47, 64], "Will": [6, 12], "optim": [6, 16, 30, 33, 52, 61, 62, 63, 64], "divis": [6, 64], "even": [6, 11, 13, 14, 16, 25, 26, 35, 36, 44, 46, 48, 56, 63, 64, 68], "small": [6, 7, 33, 36, 64], "introduc": [6, 29, 36, 44, 55, 63], "next": [6, 16, 36, 38], "step": [6, 7, 8, 9, 12, 17, 24, 29, 33, 36, 39, 40, 42, 43, 54, 55, 62, 63, 64], "decis": [6, 33, 36], "tree": [6, 19, 36, 44, 64], "One": [6, 17, 28, 46, 48, 58, 60, 63, 64, 66], "path": [6, 33, 40], "solv": [6, 52, 53], "issu": [6, 13, 14, 25, 28, 37, 44, 45, 46, 48, 56, 58, 59, 60, 62, 66, 68, 69, 70], "typic": [6, 64], "assumpt": [6, 36], "math": 6, "make_regress": [6, 30, 36], "decisiontreeregressor": [6, 33, 36, 64], "sklapi": [6, 31, 36, 40, 51, 55, 57], "casttransform": [6, 55, 64], "weird": 6, "random_st": [6, 12, 26, 29, 30, 33, 45, 56, 64], "y_train": [6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 33, 35, 36, 38, 41, 45, 55, 59, 61], "_": [6, 26, 33, 36, 40, 41, 44, 45, 54, 55, 60], "xi_train": [6, 36], "yi_train": [6, 36], "copi": [6, 11, 24, 25, 33, 35, 36, 37, 38, 42, 43, 45, 47, 55, 64], "xi_test": [6, 36], "pi": [6, 64], "int64": [6, 7, 13, 14, 15, 17, 18, 21, 31, 35, 36, 37, 38, 41, 52, 56, 64], "max_depth": [6, 33, 36, 55, 64], "model1": [6, 56], "scaler": [6, 7, 10, 12, 17, 24, 25, 33, 36, 37, 42, 43, 45, 56, 63, 64], "dt": [6, 36], "exp1": 6, "onx1": [6, 57], "sess1": 6, "And": [6, 7, 12, 13, 14, 16, 17, 19, 24, 25, 26, 37, 39, 40, 42, 43, 45, 50, 55, 56, 57, 64], "maximum": [6, 44, 59, 64], "got1": 6, "maxdiff": 6, "a1": 6, "a2": 6, "d": [6, 29, 31, 32, 36, 39, 40, 42, 44, 46, 47, 48, 49, 50, 52, 54, 64], "ab": [6, 16, 29, 36, 37, 44, 46, 47, 48, 49, 50, 52, 54, 55, 56, 64], "ravel": [6, 12, 29, 36, 37, 44, 45, 46, 47, 48, 49, 50, 52, 55, 56, 57], "md1": 6, "322": [6, 33, 58], "39065126389346": 6, "cast1": 6, "2536": 6, "1707": [6, 22], "fix": [6, 15, 16, 51, 64], "replac": [6, 15, 16, 33, 49, 62, 64], "happen": [6, 12, 15, 32, 36, 37, 46, 47, 48, 49, 50, 56, 64, 69], "By": [6, 8, 11, 25, 33, 35, 36, 45, 46, 50, 51, 62, 64], "solut": [6, 38], "div": [6, 32, 33, 36, 64], "model2": [6, 36], "cast64": [6, 36], "float64": [6, 7, 15, 16, 17, 21, 31, 36, 46, 47, 48, 64], "exp2": 6, "onx2": [6, 8, 11, 35, 36, 39, 49, 57], "div_cast": [6, 33, 36], "got2": [6, 39, 49], "md2": 6, "9884569016758178e": 6, "cast2": 6, "4171": 6, "246": [], "plot_cast_transform": [6, 27], "ship": 7, "where": [7, 11, 12, 21, 32, 35, 36, 39, 44, 61, 64, 66], "still": [7, 29, 33, 36, 64, 68], "mix": [7, 17, 40, 63], "assert_almost_equ": [7, 46, 48, 51, 52, 63], "compos": [7, 17, 26, 39, 54, 55, 61, 63, 64], "simpleimput": [7, 17, 63, 64], "titanic_url": [7, 17], "githubusercont": [7, 17], "amuel": [7, 17], "2017": [7, 17, 59], "091d371": [7, 17], "titanic3": [7, 17], "csv": [7, 17], "read_csv": [7, 17], "drop": [7, 17, 39, 55, 64], "surviv": [7, 17], "beforehand": [7, 17], "cat": [7, 17, 31, 39, 63], "embark": [7, 17], "sex": [7, 17], "pclass": [7, 17], "fillna": [7, 17], "inplac": [7, 17, 64], "y_test": [7, 8, 9, 11, 12, 13, 14, 16, 17, 19, 29, 30, 35, 36, 38, 41, 59, 61], "test_siz": [7, 17, 45], "numeric_featur": [7, 17, 63], "ag": [7, 17], "fare": [7, 17], "numeric_transform": [7, 17, 63], "strategi": [7, 17, 63, 64], "median": [7, 17, 63], "categorical_featur": [7, 17, 63, 64], "categorical_transform": [7, 17, 39, 63], "constant": [7, 16, 17, 22, 38, 46, 47, 56, 64], "fill_valu": [7, 17, 64], "onehot": [7, 17, 31, 39, 57, 63, 64], "handle_unknown": [7, 17, 39, 63, 64], "ignor": [7, 17, 35, 39, 44, 55, 62, 63, 64], "preprocessor": [7, 17, 39, 63, 64], "num": [7, 17, 63], "clf": [7, 17], "sibsp": [7, 17], "parch": [7, 17], "ticket": [7, 17], "cabin": [7, 17], "boat": [7, 17], "bodi": [7, 17, 26, 64], "dest": [7, 17], "columntransformercolumntransform": [7, 17, 39], "simpleimputersimpleimput": [7, 17], "standardscalerstandardscal": [7, 17, 24, 36, 37, 42, 43], "onehotencoderonehotencod": [7, 17, 39], "know": [7, 13, 14, 16, 17, 24, 25, 34, 42, 43, 45, 46, 47, 48, 51, 52, 63], "simpli": [7, 17, 64], "definit": [7, 12, 17, 22, 64], "convert_dataframe_schema": [7, 17], "k": [7, 12, 17, 21, 32, 33, 36, 44, 52, 54, 64], "v": [7, 12, 17, 33, 46, 47, 48, 49, 50, 52, 64], "continu": [7, 12, 17, 31, 32, 33, 36, 53, 55, 63, 64], "t": [7, 17, 21, 26, 35, 36, 46, 47, 48, 49, 50, 52, 63, 64], "elif": [7, 12, 17, 55], "initial_input": 7, "merg": [7, 17, 36], "singl": [7, 15, 16, 17, 26, 29, 30, 33, 42, 44, 62, 64, 68], "effici": [7, 17, 33, 50, 52, 64], "done": [7, 16, 17, 30, 33, 36, 38, 56, 62, 63, 64], "befor": [7, 10, 12, 17, 23, 29, 36, 40, 52, 63, 64, 65], "pipeline_titan": [7, 17], "more": [7, 16, 17, 21, 37, 42, 44, 47, 51, 52, 53, 54, 55, 60, 61, 64], "to_drop": [7, 17], "implicit": [7, 17, 63, 64], "appli": [7, 12, 17, 21, 26, 33, 52, 55, 62, 64], "ensur": [7, 17, 64], "7930564": [], "2069436": [], "90833415": [], "09166585": [], "accept": [7, 17, 64], "detail": [7, 17, 19, 39, 61, 64], "describ": [7, 11, 17, 21, 22, 32, 35, 51, 61, 64], "realli": [7, 17, 33], "matrix": [7, 8, 11, 12, 16, 17, 21, 26, 35, 39, 40, 46, 47, 48, 49, 50, 52, 55, 61, 64], "explain": [7, 17, 30, 36, 44, 64], "line": [7, 16, 17, 26, 32, 51, 52, 54, 62, 63], "reshap": [7, 10, 17, 22, 32, 39, 40, 51, 54, 56, 57, 64], "x_test2": [7, 17], "readi": [7, 17, 26], "pred_onx": [7, 9, 12, 16, 17, 19, 24, 25, 26, 42, 43, 45, 55, 59, 61], "7930564284324646": [], "2069435715675354": [], "9083341360092163": [], "0916658341884613": [], "swith": 7, "again": [7, 13, 14, 16, 40], "addit": [7, 16, 44, 52, 63, 64], "pipeline_titanic_nozipmap": 7, "20694357": [], "90833414": [], "09166583": [], "6901": 7, "6049": 7, "357": 40, "plot_complex_pipelin": [7, 27], "iri": [8, 9, 11, 15, 19, 31, 33, 35, 41, 54, 55, 59, 61], "clr": [8, 9, 11, 15, 19, 33, 35, 41, 59, 61], "500": [8, 11, 26, 35, 44, 56, 64], "confirm": [8, 11, 16], "02921719": [], "95279049": [], "01799232": [], "029217161238193512": [], "9527904391288757": [], "017992328852415085": [], "0007321650045923889": [], "5830549597740173": [], "41621288657188416": [], "res2": [8, 11, 35], "99994178": [], "48469604": [], "48475425": [], "9999427795410156": [], "4846959114074707": [], "4847536087036133": [], "340991973876953": [], "339038610458374": [], "0019540786743164": [], "052": [], "plot_convert_decision_funct": [8, 27], "deploi": [9, 20, 28, 30, 32, 58, 60, 66], "involv": [9, 16, 52, 60, 63, 64], "three": [9, 13, 14, 15, 63, 64], "forest": [9, 19, 44, 63], "ensembl": [9, 29, 30, 32, 33, 39, 59, 64], "randomforestclassifi": [9, 33, 39, 55, 59, 64], "rf_iri": [9, 59], "get_input": [9, 10, 15, 19, 22, 40, 41, 45, 59, 61], "label_nam": [9, 19, 45, 59, 61], "get_output": [9, 11, 15, 19, 22, 33, 35, 38, 41, 45, 59, 61], "261": [], "plot_convert_model": [9, 27], "leverag": [10, 48], "easi": [10, 17, 40, 60, 61, 63, 64], "fine": [10, 64], "baseestim": [10, 12, 13, 14, 26, 46, 47, 48, 49, 50, 52], "transformermixin": [10, 12, 26, 46, 47, 48, 49, 50, 52], "cluster": [10, 37, 64], "make_pipelin": [10, 55], "wrap_as_onnx_mixin": 10, "onnx_op": [10, 13, 14, 21, 22, 23, 31, 46, 47, 49, 50, 52, 56, 64], "onnxsub": [10, 46, 49, 50, 52], "onnxdiv": [10, 52], "onnx_operator_mixin": 10, "predict_with_onnxruntim": [10, 22], "arang": [10, 22, 24, 25, 42, 43, 45, 55, 57], "tr": [10, 54], "n_cluster": [10, 37, 64], "anymor": [10, 16, 36, 52], "third": [10, 13, 14, 35, 64], "wrap": [10, 42, 64], "tr_mixin": 10, "fourth": [10, 35, 64], "customoptransform": 10, "__init__": [10, 12, 13, 14, 46, 47, 48, 49, 50, 52], "self": [10, 12, 13, 14, 26, 39, 46, 47, 48, 49, 50, 52, 64], "w_": 10, "s_": 10, "std": [10, 12, 33, 37], "onnx_shape_calcul": [10, 64], "to_onnx_oper": [10, 64], "kwarg": [10, 12, 52, 55, 64], "contain": [10, 12, 13, 14, 18, 19, 20, 25, 31, 32, 33, 36, 42, 44, 46, 47, 48, 49, 50, 51, 52, 53, 56, 57, 61, 62, 63, 64, 68], "least": [10, 16, 32, 64], "opv": [10, 14, 31, 46, 48, 49, 50, 52, 56], "i0": [10, 11], "w": [10, 21, 31, 62, 64], "pipeline_onnx_mixin": 10, "3103": 10, "6900": 10, "696": [], "plot_convert_syntax": [10, 27], "probabili": [11, 33, 35], "mechan": [11, 16, 35, 61, 62, 63, 64, 67], "retain": [11, 35, 64], "alwai": [11, 36, 61, 62, 63, 64], "deactiv": [11, 62], "observ": [11, 12, 13, 14, 30, 35, 44, 46, 47], "987332284450531": [], "012667577713727951": [], "131611733437239e": [], "013057885691523552": [], "9642565846443176": [], "022685566917061806": [], "8733228e": [], "2667578e": [], "1316117e": [], "3057886e": [], "6425658e": [], "2685567e": [], "ndarrai": [11, 35], "split": [11, 26, 33, 35, 51, 62, 64], "onx3": [11, 35, 36], "sess3": [11, 35, 36], "res3": [11, 35], "out": [11, 17, 19, 26, 35, 40, 46, 47, 48, 49, 50, 52, 54, 56, 64], "output_label": [11, 17, 19, 33, 35], "38": [11, 32, 35, 64], "9873323": [], "01305789": [], "i1": [11, 57], "01266758": [], "9642566": [], "i2": [11, 57], "x32": [11, 13, 14, 36, 44, 49], "lambda": [11, 35, 42, 44], "result": [11, 12, 13, 14, 16, 28, 31, 33, 35, 36, 38, 50, 51, 52, 54, 57, 58, 60, 62, 63, 64, 66, 68], "0049615009999683934": [], "0032671009999489797": [], "003343400999995083": [], "004399800999976833": [], "0052236020000009376": [], "00505200100002412": [], "005118002000017441": [], "004427301000021089": [], "0031935009999983777": [], "003232501000013599": [], "0014022999999951935": [], "0014034010000045782": [], "0014318000000344": [], "00137750099997902": [], "0014054000000101041": [], "0014666010000041751": [], "0014159000000404376": [], "0013950000000022555": [], "0014350010000043767": [], "0013821000000007189": [], "002472800999953506": [], "0024044009999784066": [], "002418999999974858": [], "002419600999985505": [], "0024915010000086113": [], "0024198009999736314": [], "0024332000000413245": [], "0023712009999599104": [], "00237140100000488": [], "0023815999999783344": [], "143": [], "plot_convert_zipmap": [11, 27], "might": [12, 15, 33, 46, 47, 56, 63], "obvious": [12, 52], "interest": [12, 64], "studi": 12, "reproduc": 12, "onc": [12, 16, 38, 40, 63, 64, 69], "tsne": 12, "fit_transform": [12, 21, 26], "propos": [12, 14, 40, 54, 62, 64], "approxim": [12, 16, 21, 64], "about": [12, 30, 46, 47, 48, 56, 61, 62, 64], "quit": [12, 16, 22, 40], "process": [12, 19, 35, 41, 46, 47, 49, 55, 63, 64], "rightarrow": 12, "x_2": [12, 64], "mathbb": 12, "r": [12, 15, 31, 32, 33, 40, 41, 51, 64], "nearest": [12, 64], "neightbour": 12, "x_3": 12, "normal": [12, 17, 36, 41, 64], "x_4": 12, "_3": 12, "_4": 12, "inspect": [12, 13, 14], "offsetbox": 12, "clone": [12, 13, 14], "manifold": 12, "mean_squared_error": 12, "neighbor": [12, 64], "kneighborsregressor": [12, 33, 64], "get_model_alia": [12, 13, 14, 31, 50, 56], "_registr": [12, 13, 33], "get_shape_calcul": [12, 13], "predictabletsn": 12, "estim": [12, 13, 14, 21, 29, 30, 35, 52, 64], "keep_tsne_output": 12, "mlpregressor": [12, 64], "center": [12, 64], "he": [12, 21], "keep": [12, 20, 33, 36, 37, 38, 55, 63, 64], "store": [12, 16, 23, 28, 33, 36, 51, 58, 60, 61, 63, 64, 66], "member": [12, 52], "tsne_outputs_": 12, "sent": [12, 40], "meth": 12, "set_param": 12, "mlinsight": [12, 39, 40], "mlmodel": 12, "tsne_transform": 12, "understand": [12, 47, 64], "hasattr": [12, 54], "attributeerror": [12, 63], "sample_weight": [12, 13, 14, 46, 47, 48, 49, 50, 52], "spars": [12, 28, 39, 58, 60, 63, 64, 70], "n_sampl": [12, 32, 45], "n_featur": [12, 30, 32, 45, 64], "n_target": [12, 32], "individu": [12, 29, 64], "weight": [12, 26, 31, 57, 64], "sampl": [12, 29, 32, 64], "transformer_": 12, "estimator_": [12, 13, 14], "mean_": [12, 46, 47, 48, 49, 50, 52], "dimens": [12, 13, 14, 15, 37, 40, 56, 61, 64], "inv_std_": 12, "invers": [12, 46, 47, 48, 49, 50, 52, 64], "standard": [12, 19, 32, 63, 64], "loss_": 12, "loss": [12, 64], "sig": [12, 13, 14], "par": 12, "p": [12, 41, 46, 47, 49, 50, 52, 64], "var": [12, 19, 54, 64], "exp": [12, 21, 46, 47, 48, 49, 50, 52, 64], "got": [12, 15, 21, 39, 44, 46, 47, 48, 49, 51, 52, 54], "tranform": 12, "pred": [12, 13, 14, 21, 46, 47, 48, 49, 50, 52], "get_param": [12, 62], "deep": [12, 36, 37, 40, 64], "embed": 12, "t_": [12, 44], "e_": 12, "pt": 12, "pe": 12, "pn": 12, "startswith": [12, 26, 42], "n_": 12, "valueerror": [12, 54, 56], "unexpect": [12, 15, 31, 70], "n_class": [12, 45], "xd": [12, 36], "yd": 12, "img": [12, 40], "imgs_train": 12, "imgs_test": 12, "n_compon": [12, 21, 26, 63, 64], "init": [12, 29, 33, 39, 50, 64], "plot_embed": 12, "xp": 12, "titl": [12, 30, 44], "x_min": 12, "x_max": 12, "text": [12, 26, 40, 51, 54, 55], "str": [12, 16, 17, 39, 63], "cm": 12, "set1": 12, "fontdict": 12, "bold": 12, "annotationbbox": 12, "thumbnail": 12, "shown_imag": 12, "just": [12, 21, 33, 50, 52, 64], "someth": 12, "big": [12, 22, 30, 52], "dist": 12, "sum": [12, 32, 35, 44, 54, 55, 64], "4e": 12, "don": 12, "too": [12, 44, 45, 52, 64, 67], "close": [12, 16, 57], "r_": 12, "imagebox": 12, "offsetimag": 12, "cmap": 12, "gray_r": 12, "add_artist": 12, "set_xtick": 12, "set_ytick": 12, "set_titl": [12, 32, 36, 40], "x_train_tsn": 12, "axessubplot": [12, 40], "ptsne_knn": 12, "x_train_tsne2": 12, "nstandardscal": 12, "x_test_tsne2": 12, "actual": [12, 15, 17, 26, 46, 60, 64], "predictable_tsne_shape_calcul": 12, "mmust": [12, 13, 14], "_y": 12, "Then": [12, 13, 14, 17, 23, 24, 25, 33, 37, 56, 61, 64], "predictable_tsne_convert": 12, "scope": [12, 13, 14, 31, 42, 44, 46, 47, 48, 49, 50, 52, 56, 62, 63, 64], "space": [12, 26, 51, 64], "neighbour": 12, "local": [12, 13, 55, 64, 68], "knn_op": 12, "declare_local_oper": [12, 13, 14, 31, 42, 50, 56], "knn_output": 12, "declare_local_vari": [12, 13, 14, 31, 42, 50, 56], "adjust": [12, 13, 64], "submodel": [12, 13], "shape_calc": [12, 13], "blob": [12, 64], "master": [12, 64], "doc": [12, 64], "md": [12, 22, 64], "offset": [12, 17, 40, 64], "connect": [12, 64], "full_nam": [12, 13, 14, 42, 47], "custompredictabletsn": 12, "predictable_tsn": 12, "42004508": [], "7468902": [], "8616306": [], "7184533": [], "row": [12, 21, 46, 48, 61, 63, 64], "pipeline_tsn": 12, "2643": 12, "9099": 12, "537": [], "plot_custom_model": [12, 27], "goal": [13, 14], "abov": [13, 14, 21, 56, 64], "threshold": [13, 14, 36, 56, 64], "valid": [13, 14, 37, 64], "mention": [13, 14], "classifiermixin": [13, 14], "apply_ident": 13, "apply_cast": 13, "apply_great": 13, "proto": [13, 14, 22, 56], "onnx_proto": [13, 14, 56], "validatorclassifi": [13, 14], "liblinear": [13, 14, 41], "mx": [13, 14, 60], "validatorclassifiervalidatorclassifi": [13, 14], "measur": [13, 14, 64], "__main__": [13, 14, 46, 48], "predictor": [13, 14, 21, 25, 46, 48, 55, 56, 63, 68], "regist": [13, 14, 21, 31, 33, 46, 48, 51, 54, 55, 56, 57, 63, 67, 68], "yet": [13, 14, 25, 32, 42, 46, 48, 55, 56, 63, 64, 68], "contribut": [13, 14, 25, 46, 48, 56, 64, 68], "project": [13, 14, 25, 40, 46, 48, 56, 68], "piec": [13, 14, 51, 63, 66], "validator_classifier_shape_calcul": [13, 14], "input0": [13, 14], "classes_": [13, 14], "validator_classifier_convert": [13, 14], "val_op": 13, "val_label": [13, 14], "val_prob": [13, 14], "val_max": 13, "axis_nam": 13, "reducemax": [13, 64], "keepdim": [13, 14, 46, 47, 49, 50, 52, 64], "th_name": 13, "val_bin": 13, "val_val": [13, 14], "link": [13, 64], "share": [13, 39, 64], "registr": [13, 14, 49, 50, 56, 68], "customvalidatorclassifi": [13, 14], "behav": [13, 14, 64], "validator_classifier_pars": [13, 14], "this_oper": [13, 14, 31, 42, 50, 56], "end": [13, 14, 19, 30, 33, 50, 56, 62, 64], "21692188e": [], "78187646e": [], "20165825e": [], "04": [13, 14, 18, 27, 35, 44], "55331477e": [], "42879882e": [], "11586970e": [], "06729619e": [], "03": [8, 13, 18, 27, 30, 48, 54, 58], "66865423e": [], "32067281e": [], "71927334e": [], "57710245e": [], "03624213e": [], "90277158e": [], "09595488e": [], "27354510e": [], "21692228e": [], "78187609e": [], "20188575e": [], "55331355e": [], "42879808e": [], "11586997e": [], "06731057e": [], "66865396e": [], "32067323e": [], "71927327e": [], "57710135e": [], "03624710e": [], "90277326e": [], "09595485e": [], "27301522e": [], "good": [13, 14, 16, 22, 29], "validator_classifi": [13, 14], "3293": [], "4934": [13, 14], "623": [13, 27], "plot_custom_pars": [13, 27], "rewrit": [14, 21], "syntax": [14, 62, 64], "onnxgreat": 14, "onnxcast": [14, 31, 52, 56], "onnxreducemaxapi18": 14, "onnxident": [14, 56], "onnx_oper": [14, 31, 48, 56], "onnxsubestim": [14, 31, 48, 56], "rmax": 14, "great": [14, 40], "r1": 14, "r2": 14, "r3": 14, "add_to": [14, 31, 46, 48, 49, 50, 52, 56], "76059935e": [], "03779519e": [], "95844421e": [], "75056515e": [], "94360651e": [], "05164292e": [], "21298446e": [], "34560126e": [], "64226889e": [], "42712466e": [], "02185100e": [], "53543653e": [], "59571659e": [], "40344924e": [], "34166675e": [], "7607807e": [], "0377954e": [], "9584441e": [], "7510848e": [], "9436064e": [], "0516422e": [], "2129603e": [], "3456013e": [], "6422689e": [], "4271223e": [], "0218512e": [], "5354371e": [], "5957164e": [], "4034490e": [], "3380335e": [], "3414": 14, "808": [], "plot_custom_parser_altern": [14, 27], "mistak": [15, 63], "sever": [15, 35, 55, 64], "situat": [15, 36, 64], "instead": [15, 26, 29, 35, 39, 51, 55, 64], "among": [15, 59, 64], "capi": [15, 24, 39, 40], "onnxruntime_pybind11_st": [15, 24, 39, 40], "invalidargu": [15, 40], "importerror": [15, 26, 55, 56], "example2": 15, "due": [15, 17, 37, 40, 63], "bad": 15, "kind": [15, 33, 63, 64], "onnxruntimeerror": 15, "invalid_argu": 15, "misspel": 15, "invalid": [15, 64], "9999734163284302": 15, "656836477399338e": 15, "484377840758725e": 15, "09": [15, 38, 64], "9999914169311523": 15, "446793799521402e": 15, "7366836857490853e": 15, "9999918341636658": 15, "6854097541217925e": 15, "499288818100467e": 15, "goe": [15, 37, 60], "feed": [15, 64], "necessarili": [15, 22, 64], "rank": [15, 21, 64], "either": [15, 38, 64], "index": [15, 31, 51, 64], "higher": [15, 32, 54, 64], "than": [15, 17, 23, 30, 32, 37, 42, 52, 61, 64, 65], "warn": [15, 16, 32, 35, 38, 44, 55, 64], "077": [15, 20], "plot_errors_onnxruntim": [15, 27], "gaussianprocessregressor": [16, 33, 64], "precis": [16, 26, 64], "better": [16, 22, 33, 35, 36, 37, 44, 55, 64], "boston": 16, "load_diabet": [16, 29, 45], "gaussian_process": [16, 64], "kernel": [16, 64], "dotproduct": 16, "rbf": [16, 64], "doubletensortyp": [16, 39], "gpr": 16, "alpha": [16, 40, 46, 47, 48, 49, 50, 52, 64], "430": 16, "k1__sigma_0": 16, "upper": [16, 36, 41, 64], "bound": [16, 36, 64], "100000": [16, 40], "420": 16, "k2__length_scal": 16, "1e": [16, 46, 52, 63, 64], "decreas": [16, 64], "sigma_0": 16, "length_scal": 16, "unfortun": [16, 17, 36], "though": [16, 64], "went": [16, 29], "previou": [16, 32, 52, 55, 61, 62, 63, 64], "snippet": [16, 38, 63, 64], "impos": [16, 64], "therefor": [16, 36, 64], "pred_skl": [16, 29], "130": [39, 58, 64], "42456055": [], "187": [], "53356934": [], "108": [], "73681641": [], "160": 44, "21350098": [], "149": [], "2623291": [], "177": [], "39453125": [], "194": 40, "66821289": [], "205": 40, "04931641": [], "29125977": [], "106": [], "82971191": [], "1048576": 64, "seem": [16, 30, 33, 35, 52], "biggest": [16, 29], "diff": [16, 29, 36, 37, 46, 47, 48, 49, 50, 52, 54, 55], "sort": [16, 33, 40, 60, 64], "squeez": [16, 64], "1048777": [], "05126953": [], "36779785": [], "1048778": [], "23291016": [], "1048781": [], "1048795": [], "93054199": [], "321": [46, 58], "coupl": [16, 34, 54, 64], "matric": [16, 21, 28, 35, 46, 47, 48, 49, 50, 52, 54, 58, 60, 64, 70], "coeffici": [16, 17, 36, 46, 64], "order": [16, 36, 41, 62, 63, 64], "magnitud": [16, 36, 64], "difficult": [16, 22, 26, 36, 52, 67, 69], "made": [16, 57, 64], "stick": 16, "real": [16, 64], "dump": [16, 18, 46, 48, 52, 63], "onx64": 16, "sess64": 16, "pred_onx64": 16, "41578217": [], "00647203": [], "00719334": [], "00802596": [], "00877837": [], "01063271": [], "As": [16, 63, 64], "almost": [16, 26, 29, 62, 64], "bigger": [16, 64], "size32": 16, "size64": 16, "29814": 16, "57694": 16, "reflect": [16, 64], "onx64_std": 16, "highlight": [16, 64], "fact": [16, 46], "intern": 16, "sess64_std": 16, "pred_onx64_std": 16, "53861795": [], "73773253": [], "21498564": [], "26102725": [], "627": [], "20943061": [], "499": [40, 48, 58], "14770347": [], "645": [], "01273414": [], "716": [], "79358565": [], "26330566": [], "01006969": [], "01022075": [], "01030665": [], "00629352": [], "00624992": [], "pred_std": 16, "835": [], "49747381": [], "857": [], "30893859": [], "875": [], "860194": [], "914": [], "43101602": [], "943": [], "91458467": [], "reason": [16, 37, 64], "929": [], "plot_gpr": [16, 27], "give": [17, 22, 29, 33, 36, 52, 62, 63, 64], "correctli": [17, 64], "white_list": 17, "7937923": [], "2062077": [], "9162567853927612": [], "08374321460723877": [], "ask": 17, "merged_column": 17, "embarkedout": 17, "sexout": 17, "pclassout": 17, "concat_result": [17, 64], "variable2": 17, "variable1": 17, "transformed_column": 17, "probability_tensor": [17, 33, 41, 42], "output_prob": [17, 19, 33], "Not": [17, 64], "origin": [17, 21, 26, 31, 62, 63, 64], "help": 17, "numer": [17, 61, 64], "textual": 17, "num_onnx": 17, "pipeline_titanic_numer": 17, "b": [17, 19, 36, 39, 51, 54, 56, 62, 64], "x08": [17, 36], "x07": 17, "x12": [17, 36], "x08skl2onnx": [17, 36], "x1a": [17, 36], "x061": [17, 36], "x07ai": [17, 36], "x002": [17, 36], "x00": [17, 36], "xcd": 17, "x03": [17, 36], "x03age": 17, "x04fare": 17, "x0emerged_column": 17, "x06concat": 17, "x0b": 17, "x04axi": 17, "x18": [17, 36], "x01": [17, 36], "xa0": [17, 36], "x02": [17, 36], "x08variabl": [17, 36], "x07imput": 17, "x14imputed_value_float": 17, "xe0a": 17, "xd5fa": [], "x06": [17, 36], "x1e": 17, "x14replaced_value_float": 17, "x15": 17, "xc0": [17, 36], "x7f": [17, 36], "nai": [17, 36, 66], "tvariable1": 17, "x06scaler": [17, 36], "x06offset": [17, 36], "u": [40, 62, 64], "xbd": [], "xeba": 17, "xc5": [], "x07b": 17, "x14": 17, "x05scale": [17, 36], "l": [17, 41, 46, 47, 49, 50, 52, 64], "x9e": 36, "x83o": [], "x96": [], "x10pipeline_titan": 17, "x1f": 17, "x10": [17, 36], "xff": 17, "x0cshape_tensorz": 17, "x16": 17, "x06pclass": 17, "x0c": [17, 36], "x01z": 17, "x13": 17, "x03sex": 17, "x08embark": 17, "x01b": [17, 36], "tvariable1b": 17, "x04": [17, 36], "x0bb": [], "x0e": [17, 36], "numx": 17, "11333525": [], "25644025": [], "text_onnx": 17, "pipeline_titanic_textu": 17, "numt": 17, "concat": [17, 64], "int": [17, 19, 22, 32, 54, 64], "cats_str": 17, "q": [17, 64], "zero": [17, 52, 55, 64], "onehotencoder1": 17, "femal": 17, "male": 17, "onehotencoder2": 17, "concat1": 17, "imputed_value_float": [17, 64], "28": [17, 32, 39, 55, 64], "42710018157959": [], "replaced_value_float": [17, 64], "nan": [17, 31, 33, 37, 64], "shape_tensor": 17, "29": [17, 30, 32, 39, 44], "46744728088379": [], "33": [17, 32, 44], "94281768798828": [], "07723292708396912": [], "018348461017012596": [], "concat2": 17, "linearclassifi": [17, 33, 41, 64], "classlabels_int": 17, "46486303210258484": [], "00765448110178113": [], "29516637325286865": [], "21072937548160553": [], "3382607102394104": [], "25384777784347534": [], "2484898567199707": [], "2484657764434814": [], "0373810529708862": [], "007246213965117931": [], "0446032285690308": [], "intercept": [17, 64], "18806609511375427": [], "multi_class": [17, 64], "post_transform": [17, 19, 32, 64], "norm": [17, 33, 64], "l1": [17, 64], "classlabels_int64": [17, 19, 64], "int64_data": [17, 32], "sequence_typ": [17, 19], "map_typ": [17, 19], "key_typ": [17, 19], "value_typ": [17, 19], "subgraph": [17, 64], "pipeline_titanic_num": 17, "1229": 17, "2558": [17, 24, 25], "pipeline_titanic_text": 17, "5630": 17, "2735": 17, "315": [], "plot_intermediate_output": [17, 27], "easili": [18, 37, 64, 69], "78697129e": 18, "22639567e": 18, "16935601e": 18, "13765854e": 18, "54566122e": 18, "59543345e": 18, "71459904e": 18, "29410146e": 18, "71520266e": 18, "31533698e": 18, "42040920e": 18, "90474751e": 18, "18177631e": 18, "66363079e": 18, "82714171e": 18, "91632481e": 18, "69544780e": 18, "92676767e": 18, "71450677e": 18, "60454853e": 18, "81570991e": 18, "37130203e": 18, "29353551e": 18, "32392659e": 18, "13256034e": 18, "00982060e": 18, "98303620e": 18, "81811365e": 18, "90930400e": 18, "03950266e": 18, "59803428e": 18, "46831581e": 18, "70903280e": 18, "93109126e": 18, "62244448e": 18, "10619572e": 18, "63308841e": 18, "35869345e": 18, "03930533e": 18, "09485311e": 18, "15669105e": 18, "78040093e": 18, "01347652e": 18, "40159293e": 18, "74891758e": 18, "14564701e": 18, "31817617e": 18, "87010227e": 18, "29090165e": 18, "12032919e": 18, "63901656e": 18, "29285214e": 18, "14384613e": 18, "82919696e": 18, "76885583e": 18, "39046240e": 18, "59100433e": 18, "87444919e": 18, "75119957e": 18, "85595427e": 18, "00911047e": 18, "53068224e": 18, "30369386e": 18, "16": [18, 22, 32, 44, 64], "16970102e": 18, "54267314e": 18, "91291516e": 18, "74676972e": 18, "13960509e": 18, "64555024e": 18, "73696034e": 18, "08026490e": 18, "23721537e": 18, "37859393e": 18, "60754149e": 18, "90967608e": 18, "75628331e": 18, "21238177e": 18, "21796290e": 18, "95051435e": 18, "01835168e": 18, "97046115e": 18, "64772188e": 18, "61898054e": 18, "32016109e": 18, "97701819e": 18, "39030059e": 18, "67779351e": 18, "70107684e": 18, "31443589e": 18, "65991552e": 18, "58633137e": 18, "52940090e": 18, "81737794e": 18, "47187038e": 18, "21588602e": 18, "22346979e": 18, "37364649e": 18, "79895009e": 18, "03710592e": 18, "63278986e": 18, "68918985e": 18, "08509461e": 18, "45039011e": 18, "63479061e": 18, "50727140e": 18, "10449110e": 18, "58384385e": 18, "40399894e": 18, "67878895e": 18, "87647893e": 18, "04332870e": 18, "61919057e": 18, "44538953e": 18, "66380644e": 18, "56583008e": 18, "91237218e": 18, "73950435e": 18, "74122911e": 18, "32524378e": 18, "66956686e": 18, "88790754e": 18, "22119207e": 18, "86775268e": 18, "07041881e": 18, "70805502e": 18, "87088367e": 18, "01154459e": 18, "24048193e": 18, "78696918e": 18, "22639418e": 18, "16935596e": 18, "13765850e": 18, "54566121e": 18, "59543371e": 18, "71459913e": 18, "29410172e": 18, "71520233e": 18, "31533718e": 18, "42040539e": 18, "90474844e": 18, "18177342e": 18, "66362762e": 18, "82714128e": 18, "91632557e": 18, "69543815e": 18, "92676806e": 18, "71450746e": 18, "60454881e": 18, "81571263e": 18, "37130213e": 18, "29353619e": 18, "32392645e": 18, "13255882e": 18, "00982118e": 18, "98303699e": 18, "81811404e": 18, "90929934e": 18, "03950286e": 18, "59803450e": 18, "46831572e": 18, "70903301e": 18, "93112069e": 18, "62244260e": 18, "10617606e": 18, "63308799e": 18, "35869288e": 18, "03930473e": 18, "09485388e": 18, "15669155e": 18, "78041705e": 18, "01349142e": 18, "40159237e": 18, "74891722e": 18, "14564866e": 18, "31819277e": 18, "87010169e": 18, "29090086e": 18, "12032884e": 18, "63901585e": 18, "29285216e": 18, "14384818e": 18, "82919535e": 18, "76885669e": 18, "39046200e": 18, "59100525e": 18, "87444773e": 18, "75120188e": 18, "85595278e": 18, "00911009e": 18, "53068203e": 18, "30369331e": 18, "16970128e": 18, "54267330e": 18, "91291523e": 18, "74676971e": 18, "13960505e": 18, "64554977e": 18, "73695993e": 18, "08026457e": 18, "23721600e": 18, "37859321e": 18, "60754204e": 18, "90967607e": 18, "75628328e": 18, "21238220e": 18, "21796322e": 18, "95051479e": 18, "01835155e": 18, "97046089e": 18, "64772224e": 18, "61898088e": 18, "32016134e": 18, "97701883e": 18, "39030147e": 18, "67779541e": 18, "70108199e": 18, "31443739e": 18, "65990937e": 18, "58633217e": 18, "52940059e": 18, "81736946e": 18, "47186923e": 18, "21588576e": 18, "22346997e": 18, "37364638e": 18, "79894984e": 18, "03710651e": 18, "63278937e": 18, "68918991e": 18, "08509481e": 18, "45039046e": 18, "63479507e": 18, "50727105e": 18, "10449100e": 18, "58384484e": 18, "40399802e": 18, "67878950e": 18, "87647831e": 18, "04333529e": 18, "61919039e": 18, "44537946e": 18, "66464688e": 18, "56583288e": 18, "91237196e": 18, "73950393e": 18, "74122953e": 18, "32524411e": 18, "66956298e": 18, "88790704e": 18, "22119273e": 18, "86775194e": 18, "07041626e": 18, "70805526e": 18, "87088423e": 18, "01154475e": 18, "24048182e": 18, "decision_funct": 18, "9999857e": 18, "9906250e": 18, "4855036e": 18, "5576649e": 18, "3256181e": 18, "2131513e": 18, "9896193e": 18, "2251471e": 18, "2387249e": 18, "9815153e": 18, "4764896e": 18, "9999928e": 18, "0581199e": 18, "4929749e": 18, "4862788e": 18, "7568555e": 18, "3902442e": 18, "9589952e": 18, "5052906e": 18, "3060734e": 18, "independetli": 18, "replai": 18, "to_sav": 18, "data_input": 18, "data_output": 18, "del": 18, "pkl": 18, "rb": [18, 23, 40, 64], "restor": 18, "529": 35, "plot_investigate_pipelin": [18, 27], "decisiontreeclassifi": [19, 33, 64], "reserv": [19, 64], "within": [19, 29, 37, 49, 64], "convert_oper": 19, "begin": [19, 64], "n_var": 19, "n_op": 19, "call_convert": 19, "sklearndecisiontreeclassifi": 19, "sklearnzipmap": 19, "_update_domain_vers": 19, "treeensembleclassifi": [19, 64], "class_id": [19, 64], "class_nodeid": 19, "17": [22, 32, 36, 37, 44, 52, 64], "class_treeid": 19, "class_weight": [19, 63, 64], "nodes_falsenodeid": [19, 32], "nodes_featureid": [19, 32], "nodes_hitr": [19, 32], "nodes_missing_value_tracks_tru": [19, 32], "nodes_mod": [19, 32], "branch_leq": [19, 32, 64], "leaf": [19, 32, 64], "nodes_nodeid": [19, 32], "nodes_treeid": [19, 32], "nodes_truenodeid": [19, 32], "nodes_valu": [19, 32], "699999988079071": [], "649999976158142": [], "949999809265137": 19, "5499999523162842": [], "849999904632568": [], "5752d2b4e1df4c989ade4627b98731c4": [], "is_root": 19, "_class": 19, "parent": [19, 64], "sequencetyp": 19, "element_typ": 19, "dictionarytyp": 19, "is_evalu": 19, "is_leaf": 19, "is_f": 19, "shape2": 19, "infer_typ": 19, "fed": [19, 64], "falsefals": 19, "truetru": 19, "b5594342172f43ff93b25af6b3c6d224": [], "info": 19, "0b8e513e8eff4c52825f358fdf9594c6": [], "070": [], "plot_log": [19, 27], "relat": [20, 59, 64], "product": [20, 21, 29, 64], "track": 20, "get_exampl": 20, "metadata_prop": 20, "onnxml": 20, "onnxmltool": [20, 24, 25, 43, 44, 45, 55, 59, 60, 61, 65], "0116": 20, "With": [20, 26, 64], "meta": 20, "get_modelmeta": 20, "custom_metadata_map": 20, "descript": [20, 64], "graph_nam": 20, "3c59201b940f410fa29dc71ea9d5767d": 20, "plot_metadata": [20, 27], "factor": [21, 36, 64], "h": [21, 54, 64], "wh": [21, 64], "sim": 21, "m": [21, 54, 59, 60, 64], "m_": 21, "ij": 21, "binari": [21, 57, 64], "j": [17, 21, 54, 64], "bought": 21, "recommand": 21, "address": [21, 62], "complex": [21, 52, 54, 60, 64], "theoret": 21, "gradient": [21, 64], "descent": [21, 64], "onnxarrayfeatureextractor": 21, "onnxmul": [21, 23, 52, 56], "onnxreducesum": 21, "mat": [21, 54], "mod": [21, 60, 64], "components_": [21, 40, 48], "inverse_transform": 21, "89404990914927": [], "30724546335219854": [], "10912546845533133": [], "1066121714582031": [], "1908380123649229": [], "0146740993369194": [], "9848861105125672": [], "947024954574635": [], "15362273167609927": [], "054562734227665666": [], "closer": 21, "row_index": 21, "col_index": 21, "plan": 21, "nmf_to_onnx": 21, "col": [21, 31, 64], "rec": 21, "indices_typ": 21, "ar_arrayfeatureextractorcst1": 21, "ar_z02": 21, "ar_arrayfeatureextractor1": 21, "arrayfeatureextractor": [21, 64], "ar_arrayfeatureextractorcst": 21, "ar_z0": 21, "ar_arrayfeatureextractor": 21, "mu_c0": [21, 23, 33, 37], "mu_mul": [21, 23, 37], "mul": [21, 23, 32, 37, 64], "re_reducesum": 21, "reducesum": [21, 64], "float_data": [21, 23, 32], "9490751028060913": [], "3361228406429291": [], "8783158659934998": [], "8525309562683105": [], "9140868782997131": [], "5677627325057983": [], "45704343914985657": [], "12800176441669464": [], "1552497148513794": [], "06400088220834732": [], "predict_onnx": 21, "row_indic": 21, "col_indic": 21, "onnx_pr": 21, "8940499": [], "3072455": [], "109125465": [], "1066122": [], "19083802": [], "0146742": [], "98488617": [], "94702494": [], "15362275": [], "054562733": [], "graph_nmf": 21, "1654": 21, "846": 21, "290": [], "plot_nmf": [21, 27], "aim": 22, "pythonapioverview": 22, "nicer": 22, "protobuf": 22, "valueinfoproto": 22, "make_tensor_value_info": 22, "node_def": 22, "make_nod": 22, "pad": [22, 40, 64], "mode": [22, 62, 64], "graph_def": 22, "make_graph": 22, "model_def": [22, 33], "make_model": 22, "checker": 22, "check_model": 22, "dynam": [22, 37, 64], "instal": [22, 29, 44, 64], "onnxpad": 22, "pa_pad": 22, "node1": 22, "transpos": [22, 40, 46, 47, 49, 50, 52, 64], "perm": [22, 64], "node2": 22, "original_model": 22, "translat": [22, 64], "onnxtranspos": [22, 52], "dinput": 22, "tr_transposed0": 22, "21": [5, 22, 27, 32, 39, 44], "22": [22, 32, 44, 55], "pipeline_transpose2x": 22, "1524": 22, "056": [], "plot_onnx_oper": [22, 27], "demonstr": [23, 31, 47], "onnxadd": [23, 52, 56], "onnx_fct": 23, "example1": 23, "mu_mulcst": [23, 33, 37], "ad_addcst": [23, 33, 37], "ad_add": [23, 37], "fid": 23, "read": [23, 40, 45, 64], "parsefromstr": 23, "431": 23, "602": 23, "375": [], "plot_pipelin": [23, 27], "long": [24, 25, 42, 43, 45], "ortfail": 24, "convert_lightgbm": [24, 43, 44, 55], "ind": [24, 25, 31, 42, 43, 45, 55, 63], "shuffl": [24, 25, 42, 43, 45, 55, 64], "lgbm": [24, 25, 42, 43], "n_estim": [24, 25, 29, 32, 33, 42, 43, 44, 45, 55, 56, 64], "lgbmclassifierlgbmclassifi": [24, 43], "lightgbmlgbmclassifi": [24, 33, 43, 55], "nocl": [24, 25, 33, 42, 43, 45, 55, 62], "pipeline_lightgbm": [24, 43], "52034427": [], "24567523": [], "2339805": [], "5203443169593811": [], "2456752359867096": [], "2339804768562317": [], "2549": 24, "598": [], "plot_pipeline_lightgbm": [24, 27], "convert_xgboost": [25, 45, 55], "pipeline_xgboost": [25, 45], "xgboostxgbclassifi": [25, 33, 45, 55], "15358469": [], "44575584": [], "4006595": [], "15358468890190125": [], "44575580954551697": [], "4006594717502594": [], "2485": 25, "661": [], "plot_pipeline_xgboost": [25, 27], "inspir": 26, "replic": 26, "taken": [26, 64], "fetch_20newsgroup": 26, "_twenty_newsgroup": 26, "strip_newsgroup_foot": 26, "strip_newsgroup_quot": 26, "24": [26, 32, 44, 55], "twenty_newsgroup": 26, "truncatedsvd": [26, 63, 64], "feature_extract": [26, 51, 54, 55, 64], "classification_report": 26, "categori": [26, 31, 64], "alt": 26, "atheism": 26, "talk": 26, "religion": 26, "misc": 26, "subset": [26, 64], "extract": [26, 32, 63, 64], "field": [26, 64], "form": [26, 64], "subjectbodyextractor": 26, "subject": [26, 55], "usenet": 26, "post": [26, 41, 64], "pass": [26, 55, 63, 64, 68], "sequenc": [26, 35, 64], "construct": [26, 64], "header": 26, "bod": 26, "partit": 26, "sub": [26, 31, 47, 64], "break": 26, "train_data": [26, 39], "test_data": 26, "union": [26, 55, 64], "min_df": [26, 64], "50": [26, 30, 32, 39, 44, 55, 64], "max_featur": [26, 64], "body_bow": 26, "best": [26, 64], "body_stat": 26, "stat": [26, 55], "textstat": 26, "vect": 26, "dictvector": [26, 64], "transformer_weight": [26, 64], "combin": [26, 40, 64, 68], "linearsvc": [26, 33, 64], "fulli": [26, 29, 31, 63], "recal": 26, "f1": 26, "69": [26, 32], "77": [32, 45], "73": [26, 32, 39, 44, 45], "286": [], "74": 32, "65": [30, 32], "70": [26, 30, 32], "284": [], "accuraci": 26, "71": [26, 30, 32, 39], "570": [26, 40, 45, 58], "macro": 26, "avg": [26, 64], "72": [26, 30, 32, 39], "exact": [26, 62, 63, 64], "tokenis": 26, "come": [26, 42, 52, 63, 64], "gensim": 26, "nltk": 26, "regular": [26, 62, 64], "express": [26, 44, 62], "current": [26, 62, 63, 64, 69], "pipeline_tfidf": 26, "jew": 26, "hide": 26, "keith": 26, "cco": 26, "delet": 26, "nso": 26, "german": 26, "poster": 26, "remark": 26, "anti": 26, "semit": 26, "perhap": 26, "nimpli": 26, "anyon": 26, "germani": 26, "who": 26, "doesn": [26, 64], "agre": [26, 36], "isra": 26, "polici": 26, "nnazi": 26, "prai": 26, "qualifi": 26, "casual": 26, "nif": 26, "term": [26, 61, 64], "bring": 26, "nyour": 26, "bigotri": 26, "shine": 26, "4376964569091797": [], "5623035430908203": [], "71783892": [], "28216108": [], "exactli": [26, 64], "4889": [], "11475": 26, "917": [49, 58], "plot_tfidfvector": [26, 27], "auto_exampl": 27, "mb": [27, 58], "tricki": [28, 37, 58, 60, 70], "countvector": [28, 33, 55, 58, 60, 64, 70], "deal": [28, 36, 58, 63], "tf": [28, 40, 55, 58, 64], "idf": [28, 55, 58, 64], "woe": [28, 31, 58, 60, 70], "appropri": [28, 46, 47, 58, 60, 66], "parser": [28, 31, 42, 56, 58, 60, 64, 68], "catboost": [28, 58, 60, 65], "transfer": [28, 37, 58, 60, 66], "pyod": [28, 58, 60, 67], "iforest": [28, 58, 60, 67], "woeencod": [28, 58], "categorical_encod": [28, 58], "design": [28, 33, 42, 53, 58, 60, 69], "auto_tutorial_python": 28, "auto_tutorial_jupyt": 28, "program": [29, 64], "vote": [29, 64], "pyquickhelp": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52, 60, 68], "helpgen": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "graphviz_help": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "plot_graphviz": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "gradientboostingregressor": [29, 30, 64], "randomforestregressor": [29, 30, 33, 64], "votingregressor": [29, 30, 64], "linearregress": [29, 30, 64], "mlprodict": [29, 30, 33, 34, 36, 37, 39, 40, 42, 43, 45, 46, 48, 49, 50, 52, 53, 60], "onnxrt": [29, 30, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "onnxinfer": [29, 30, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "return_x_i": 29, "reg1": [29, 30], "reg2": [29, 30], "reg3": [29, 30], "ereg": [29, 30], "gb": [29, 30], "rf": [29, 30, 39, 64], "lr": [29, 30], "votingregressorvotingregressor": [29, 30], "gbgradientboostingregressorgradientboostingregressor": [29, 30], "rfrandomforestregressorrandomforestregressor": [29, 30], "lrlinearregressionlinearregress": [29, 30], "argument": [29, 39, 61, 64], "pred_ort": 29, "redict": 29, "126": [], "251785": [], "151": [], "2507": [], "125": 40, "82831": [], "168": [29, 40], "01163": [], "134": [42, 58], "09468": [], "25178359": [], "25070368": [], "82830201": [], "01162825": [], "09468623": [], "p1": [29, 36, 46, 47, 48, 49, 50, 52], "p2": [29, 36, 46, 47, 48, 49, 50, 52], "7576211803316255e": [], "0988937178014946e": [], "absolut": [29, 64], "rel": [29, 64], "margin": [29, 64], "platform": [29, 40, 64], "meant": [29, 64, 68], "oinf": [29, 30, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "python_compil": [29, 30, 33, 50], "compiled_run": [29, 33, 50], "dict_input": [29, 33, 50], "yield_op": [29, 33, 50], "notimplementederror": [29, 33, 39, 42, 46, 47, 49, 50, 52], "yields_op": [29, 33, 50], "w0": 29, "var_0": 29, "n0_treeensembleregressor_1": 29, "var_2": 29, "n1_linearregressor": [], "var_1": 29, "n2_treeensembleregressor_1": [], "wvar_1": 29, "n3_mul": 29, "wvar_2": 29, "n4_mul": 29, "wvar_0": 29, "n5_mul": 29, "fvar_2": 29, "n6_flatten": 29, "fvar_1": 29, "n7_flatten": 29, "fvar_0": 29, "n8_flatten": 29, "n9_sum": 29, "pred_pyrt": 29, "graphviz": 29, "to_dot": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "get_xaxi": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "set_vis": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "get_yaxi": [29, 33, 34, 37, 39, 42, 43, 45, 46, 48, 49, 50, 52], "090": [], "plot_abegin_convert_pipelin": [29, 58], "similar": [30, 64], "config_context": 30, "tutori": [30, 40, 49, 59, 61], "11000": 30, "train_siz": 30, "110": [30, 34, 35, 58], "10890": 30, "assume_finit": 30, "per": [30, 35, 44, 57, 64], "belong": [30, 57], "ob": [30, 49, 55], "batch_siz": [30, 64], "mt": [30, 49], "div_by_numb": [30, 49], "mean_ob": 30, "df_skl": 30, "25": [12, 27, 30, 32, 44, 54, 55, 64], "54": [32, 42, 44], "39": [32, 44, 58], "45": 32, "43": 32, "009080": [], "001306": [], "007989": [], "013018": [], "008571": [], "001153": [], "007692": [], "013049": [], "000857": [], "045106": [], "004732": [], "037502": [], "051175": [], "000045": [], "167687": [], "006227": [], "159979": [], "178903": [], "000017": 49, "set_index": [30, 44], "logx": 30, "logi": 30, "mt2": [30, 49], "pyrt": 30, "35": [32, 39, 44, 55, 64], "99": [32, 64], "95": 32, "37": [30, 32, 40], "008857": [], "000859": [], "008090": [], "011717": [], "000040": [], "000097": [], "008234": [], "000477": [], "007522": [], "010283": [], "000823": [], "000013": [], "040150": [], "002696": [], "036057": [], "044244": [], "000122": [], "189708": [], "009944": [], "176911": [], "203605": [], "000019": [], "000004": [3, 30, 55], "languag": [30, 46, 48], "openmp": 30, "101": 32, "plot_bbegin_measure_tim": [30, 58], "anyhow": 31, "encod": [31, 64], "sklordinalencod": 31, "category_encod": 31, "util": [31, 42], "woetransform": [31, 57], "405712": 31, "724166": 31, "appear": [31, 45, 64, 70], "ordinal_encod": 31, "ordin": 31, "545531": 31, "961411": 31, "000000": 31, "ordenc_to_sklearn": 31, "op_map": 31, "column_map": 31, "isnan": [31, 64], "iloc": 31, "skl_ord": 31, "categories_": 31, "ordinal_encoder_shape_calcul": 31, "input_typ": [31, 46, 47, 48, 49, 50, 52, 64], "input_dim": [31, 46, 47, 48, 49, 50, 52, 64], "get_first_dimens": [31, 46, 47, 56], "second_dim": 31, "output_typ": [31, 46, 47, 48, 49, 50, 52], "ordinal_encoder_convert": 31, "categoricalencoderordinalencod": 31, "enc": 31, "ord_onx": 31, "woeenc_to_sklearn": 31, "passthrough": [31, 39, 64], "interv": [31, 57, 64], "woe_encoder_pars": 31, "catwo": 31, "woe_encoder_shape_calcul": 31, "woe_encoder_convert": 31, "categoricalencoderwoeencod": 31, "035947": 31, "woe_onx": 31, "4057125": 31, "03594739": 31, "7241662": 31, "162": [], "plot_catwoe_transform": [31, 58], "svc": [32, 33, 64], "break_ti": [32, 64], "argmin": [32, 37, 64], "sometim": 32, "move": [32, 44, 64], "isolationforest": [32, 33, 64], "dig": [32, 39], "make_blob": 32, "cl": [32, 55], "node_sample0_gathercst": 32, "node_sample0_output0": 32, "node_sample0_gath": 32, "gather": [32, 64], "node_sample0_y0": 32, "node_sample0_treeensembleregressor": 32, "treeensembleregressor": [32, 36, 44, 64], "34": [30, 32, 39, 55], "31": [32, 44, 64], "66": [26, 32, 39], "51": [32, 44], "44": [30, 32, 44, 45], "41": 32, "48": 32, "47": [32, 44], "59": 32, "56": [30, 32, 39, 42, 64], "55": [30, 32, 39, 40, 42, 57, 58], "58": [30, 32, 44], "63": 32, "62": 32, "78": [26, 32], "76": 32, "94": [32, 64], "87": 32, "84": 32, "83": [29, 32, 44], "86": [32, 39, 40], "91": 32, "90": [32, 40], "93": 32, "26": [32, 40, 44], "27": [32, 39, 42, 44], "30": [32, 33, 64], "32": [32, 64], "36": [27, 32, 64], "42": [16, 32, 44, 45, 56], "46": [32, 40], "49": 32, "52": [30, 32], "53": 32, "57": [32, 44, 64], "60": [32, 39, 40, 44], "61": [32, 44], "64": [32, 64], "67": [32, 44], "68": [32, 39], "79": [32, 39], "80": [32, 39, 44], "81": 32, "82": 32, "85": [29, 32], "88": 32, "89": 32, "92": [32, 44], "266170501708984": [], "334328651428223": [], "034525394439697": [], "52513599395752": [], "660526275634766": [], "71975040435791": [], "108312606811523": [], "839447021484375": [], "21701192855835": [], "774919509887695": [], "8856401443481445": [], "797633171081543": [], "984252452850342": [], "6772356033325195": [], "704787731170654": [], "40237283706665": [], "371163368225098": [], "6770356893539429": [], "214893817901611": [], "237088203430176": [], "711249351501465": [], "798238754272461": [], "876944065093994": [], "5607099533081055": [], "984869003295898": [], "9046992659568787": [], "560516595840454": [], "38022053241729736": [], "5934302806854248": [], "406733512878418": [], "129587173461914": [], "6596143245697021": [], "8527858257293701": [], "638913154602051": [], "915438175201416": [], "696831703186035": [], "788393974304199": [], "765750408172607": [], "701188564300537": [], "713160514831543": [], "481431484222412": [], "850746154785156": [], "2321271896362305": [], "63899040222168": [], "186695575714111": [], "016422748565674": [], "95450496673584": [], "target_id": 32, "target_nodeid": 32, "target_treeid": 32, "target_weight": [32, 64], "node_sample1_output0": 32, "node_sample1_gath": 32, "node_sample2_output0": 32, "node_sample2_gath": 32, "path_length0_output0": 32, "path_length0_cast": 32, "node_sample0_output02": 32, "node_sample0_cast": 32, "node_sample1_y0": 32, "node_sample1_treeensembleregressor": 32, "025412559509277": [], "928980350494385": [], "8938546180725098": [], "706992149353027": [], "9994635581970215": [], "97057056427002": [], "405430793762207": [], "676019668579102": [], "133168697357178": [], "025893211364746": [], "305616855621338": [], "314302444458008": [], "520633697509766": [], "89926815032959": [], "5265917778015137": [], "019530531018972397": [], "2273964881896973": [], "092949628829956": [], "014272689819336": [], "397225856781006": [], "261676549911499": [], "4523181915283203": [], "8018147945404053": [], "2950507402420044": [], "165961265563965": [], "079153537750244": [], "741573333740234": [], "917917251586914": [], "4127278327941895": [], "30768346786499": [], "146634578704834": [], "0040082931518555": [], "009802341461182": [], "448383331298828": [], "801933765411377": [], "012928485870361": [], "983423233032227": [], "4175920486450195": [], "671760082244873": [], "9853739738464355": [], "792556285858154": [], "745481967926025": [], "081775665283203": [], "793870449066162": [], "94163179397583": [], "232661724090576": [], "node_sample2_y0": 32, "node_sample2_treeensembleregressor": 32, "8356852531433105": [], "123292922973633": [], "611776828765869": [], "791582107543945": [], "509452819824219": [], "640423774719238": [], "780087471008301": [], "965262413024902": [], "822331428527832": [], "354574203491211": [], "922163486480713": [], "886270999908447": [], "329177856445312": [], "452418327331543": [], "01196002960205": [], "424698829650879": [], "655773162841797": [], "102919578552246": [], "830965042114258": [], "4484344720840454": [], "252330303192139": [], "8563392162323": [], "081825256347656": [], "8082351684570312": [], "509328365325928": [], "933426856994629": [], "2355732917785645": [], "369994640350342": [], "744357109069824": [], "534114360809326": [], "335564613342285": [], "441437244415283": [], "256015777587891": [], "434825897216797": [], "37266731262207": [], "551617622375488": [], "57167387008667": [], "383504390716553": [], "611318588256836": [], "944201469421387": [], "20792293548584": [], "6681013107299805": [], "453298091888428": [], "935305118560791": [], "287935256958008": [], "path_length0_y0": 32, "path_length0_labelencod": 32, "labelencod": [32, 64], "keys_int64": 32, "values_float": 32, "node_sample0_y02": 32, "node_sample0_labelencod": 32, "path_length1_output0": 32, "path_length1_cast": 32, "node_sample1_output02": 32, "node_sample1_cast": 32, "path_length2_output0": 32, "path_length2_cast": 32, "node_sample2_output02": 32, "node_sample2_cast": 32, "path_length0_reshapecst": 32, "path_length0_reshaped0": 32, "path_length0_reshap": 32, "allowzero": [32, 64], "node_sample0_reshaped0": 32, "node_sample0_reshap": 32, "path_length1_y0": 32, "path_length1_labelencod": 32, "node_sample1_y02": 32, "node_sample1_labelencod": 32, "path_length2_y0": 32, "path_length2_labelencod": 32, "node_sample2_y02": 32, "node_sample2_labelencod": 32, "dec_powcst": 32, "plus2_0_c0": 32, "plus2_0_great": 32, "greater": [32, 64], "eq2_0_c0": 32, "eq2_0_equ": 32, "equal": [32, 59, 64], "path_length1_reshaped0": 32, "path_length1_reshap": 32, "node_sample1_reshaped0": 32, "node_sample1_reshap": 32, "path_length2_reshaped0": 32, "path_length2_reshap": 32, "node_sample2_reshaped0": 32, "node_sample2_reshap": 32, "plus2_2_c0": 32, "plus2_2_great": 32, "eq2_0_output0": 32, "eq2_0_cast": 32, "plus2_0_output0": 32, "plus2_0_cast": 32, "eq2_1_c0": 32, "eq2_1_equ": 32, "plus2_1_c0": 32, "plus2_1_great": 32, "eq2_2_c0": 32, "eq2_2_equ": 32, "eqp2ps0_c0": 32, "eqp2ps0_mul": 32, "plus2_2_output0": 32, "plus2_2_cast": 32, "eq2_1_output0": 32, "eq2_1_cast": 32, "plus2_1_output0": 32, "plus2_1_cast": 32, "eq2_2_output0": 32, "eq2_2_cast": 32, "eqp2p_m1_0_addcst": 32, "eqp2p_m1_0_c0": 32, "eqp2p_m1_0_add": 32, "eqp2p_m1_0_maxcst": 32, "eqp_ns0_max0": 32, "eqp_ns0_max": 32, "eqp2ps1_c0": 32, "eqp2ps1_mul": 32, "eqp2ps2_c0": 32, "eqp2ps2_mul": 32, "eqp2p_m1_0_max0": 32, "eqp2p_m1_0_max": 32, "eqp2p_m1_0_maxcst1": 32, "eqp2p_m1_0_max02": 32, "eqp2p_m1_0_max1": 32, "eqp2p_m1_1_c0": 32, "eqp2p_m1_1_add": 32, "eqp_ns1_max0": 32, "eqp_ns1_max": 32, "eqp2p_m1_2_c0": 32, "eqp2p_m1_2_add": 32, "eqp_ns2_max0": 32, "eqp_ns2_max": 32, "eqp_log0_output0": 32, "eqp_log0_log": 32, "eqp_ns0_c01": 32, "eqp_ns0_div": 32, "eqp2p_m1_1_max0": 32, "eqp2p_m1_1_max": 32, "eqp2p_m1_1_max02": 32, "eqp2p_m1_1_max1": 32, "eqp2p_m1_2_max0": 32, "eqp2p_m1_2_max": 32, "eqp2p_m1_2_max02": 32, "eqp2p_m1_2_max1": 32, "eqp_log0_addcst": 32, "eqp_log0_c01": 32, "eqp_log0_add": 32, "eqp_ns0_mulcst": 32, "eqp_ns0_c0": 32, "eqp_ns0_mul": 32, "eqp_log1_output0": 32, "eqp_log1_log": 32, "eqp_ns1_c01": 32, "eqp_ns1_div": 32, "eqp_ns2_c01": 32, "eqp_ns2_div": 32, "eqp_log2_output0": 32, "eqp_log2_log": 32, "eqp_log0_c0": 32, "eqp_log0_mul": 32, "eqp_log1_c01": 32, "eqp_log1_add": 32, "eqp_ns1_c0": 32, "eqp_ns1_mul": 32, "eqp_log2_c01": 32, "eqp_log2_add": 32, "eqp_ns2_c0": 32, "eqp_ns2_mul": 32, "avlog0_c01": 32, "avlog0_add": 32, "eqp_log1_c0": 32, "eqp_log1_mul": 32, "eqp_log2_c0": 32, "eqp_log2_mul": 32, "avlog0_c0": 32, "avlog0_mul": 32, "avlog1_c01": 32, "avlog1_add": 32, "avlog2_c01": 32, "avlog2_add": 32, "avpl0_c0": 32, "avpl0_add": 32, "avlog1_c0": 32, "avlog1_mul": 32, "avlog2_c0": 32, "avlog2_mul": 32, "depth0_c01": 32, "depth0_add": 32, "avpl1_c0": 32, "avpl1_add": 32, "avpl2_c0": 32, "avpl2_add": 32, "depth0_c0": 32, "depth0_add1": 32, "depth1_c01": 32, "depth1_add": 32, "depth2_c01": 32, "depth2_add": 32, "depth1_c0": 32, "depth1_add1": 32, "depth2_c0": 32, "depth2_add1": 32, "dec_sum0": 32, "dec_sum": 32, "dec_divcst": 32, "dec_c0": 32, "dec_div": 32, "dec_y01": 32, "dec_neg": 32, "neg": [32, 33, 64], "dec_z0": 32, "dec_pow": 32, "pow": [32, 64], "dec_y0": 32, "dec_neg1": 32, "dec_addcst": 32, "dec_add": 32, "predict_c01": 32, "predict_less": 32, "less": [32, 47, 64], "predict_output0": 32, "predict_cast": 32, "predict_mulcst": 32, "predict_c0": 32, "predict_mul": 32, "predict_addcst": 32, "predict_add": 32, "5772156715393066": 32, "094013214111328": 32, "dom": 32, "older": 32, "get_domain_opset": 32, "1405": 32, "userwarn": [32, 35, 55], "becam": 32, "opset_ml": 32, "top": [32, 64], "653": [], "plot_cbegin_opset": [32, 58], "been": [33, 62, 64], "newer": 33, "speed": [33, 52, 64], "ration": 33, "choic": [33, 35, 59, 64], "probabilit": 33, "purpos": [33, 63], "signific": [33, 36, 44], "sens": [33, 37], "pformat": 33, "minmaxscal": [33, 56, 64], "_converter_pool": 33, "n0_linearclassifi": 33, "n1_normal": 33, "visual": [33, 49], "visualis": 33, "n1_cast": 33, "n2_normal": 33, "n3_zipmap": 33, "flaw": 33, "pickabl": 33, "convent": [33, 64], "clr__zipmap": 33, "ca_output0": 33, "n0_cast": 33, "n1_mul": 33, "n2_add": 33, "n3_linearclassifi": 33, "n4_normal": 33, "unscal": 33, "88268626": 33, "10948393": 33, "00782984": 33, "7944385": 33, "19728662": 33, "00827491": 33, "85557765": 33, "13792053": 33, "00650185": 33, "8262804": 33, "16634221": 33, "00737737": 33, "90050155": 33, "092388": 33, "00711049": 33, "did": [33, 38], "2707398": 33, "18354762": 33, "4542873": 33, "9857951": 33, "5928172": 33, "5786123": 33, "2349296": 33, "4098304": 33, "6447601": 33, "1071343": 33, "5042473": 33, "6113818": 33, "3727787": 33, "095824": 33, "4686027": 33, "clr__raw_scor": 33, "figur": [33, 64], "clrrf": 33, "n_nodes_ptr": 33, "todens": [33, 51, 54], "10101": 33, "detect": [33, 64], "all_opt": 33, "opt": 33, "get_allowed_opt": 33, "isinst": [33, 42], "lgbmclassifi": [33, 55], "lightgbmboost": 33, "lightgbmlgbmregressor": [33, 44], "skl2onnxtraceablecountvector": 33, "tokenexp": [33, 62], "keep_empty_str": 33, "skl2onnxtraceabletfidfvector": 33, "adaboostclassifi": [33, 64], "baggingclassifi": [33, 64], "bayesiangaussianmixtur": [33, 64], "bayesianridg": [33, 64], "return_std": 33, "bernoullinb": [33, 64], "calibratedclassifiercv": [33, 64], "categoricalnb": [33, 64], "complementnb": [33, 64], "decision_leaf": 33, "extratreeclassifi": [33, 64], "extratreeregressor": [33, 64], "extratreesclassifi": [33, 64], "extratreesregressor": [33, 64], "gaussianmixtur": [33, 64], "gaussiannb": [33, 64], "gaussianprocessclassifi": [33, 64], "return_cov": 33, "gradientboostingclassifi": [33, 64], "histgradientboostingclassifi": [33, 55, 64], "histgradientboostingregressor": [33, 64], "kmean": [33, 37, 64], "gemm": [33, 37, 49, 50, 64], "knnimput": [33, 64], "kneighborsclassifi": [33, 64], "kneighborstransform": [33, 64], "kernelpca": [33, 64], "localoutlierfactor": [33, 64], "mlpclassifi": [33, 64], "maxabsscal": [33, 64], "minibatchkmean": [33, 64], "multioutputclassifi": [33, 64], "multinomialnb": [33, 64], "nearestneighbor": [33, 64], "onevsoneclassifi": [33, 64], "onevsrestclassifi": [33, 64], "quadraticdiscriminantanalysi": [33, 64], "radiusneighborsclassifi": [33, 64], "radiusneighborsregressor": [33, 64], "robustscal": [33, 64], "stackingclassifi": [33, 64], "tfidftransform": [33, 55, 64], "votingclassifi": [33, 64], "wrappedlightgbmboosterclassifi": 33, "fct_score_cdist_sum": 33, "919": [], "plot_dbegin_opt": [33, 58], "97070102": [], "15498525": [], "63777445": [], "4176215": [], "5097664": [], "9707017": [], "154985": [], "63777494": [], "4176211": [], "5097675": [], "2162423109999736": [], "2573255129999552": [], "184": [], "plot_dbegin_options_list": [34, 58], "slower": [35, 44], "multioutput": [35, 64], "9853860139846802": [], "014613897539675236": [], "815294284479023e": [], "00010922097135335207": [], "13160039484500885": [], "868290364742279": [], "8538601e": [], "4613898e": [], "8152943e": [], "0922097e": [], "3160039e": [], "6829036e": [], "i10": 35, "i12": 35, "0146139": [], "1316004": [], "i14": 35, "007510155599993596": [], "0021944886999904155": [], "0030343784000024243": [], "lost": 35, "onx4": [35, 36], "sess4": [35, 36], "res4": 35, "007204065699988859": [], "problem": [35, 63], "vstack": [35, 40], "onx5": 35, "sess5": 35, "res5": 35, "7305548e": [], "6944404e": [], "9889363e": [], "5507628e": [], "4923458e": [], "3414233e": [], "8431313e": [], "5686885e": [], "2535330e": [], "6916695e": [], "9393591e": [], "7253415e": [], "8142220e": [], "5142562e": [], "5151397e": [], "6824331e": [], "6338608e": [], "3488331e": [], "9189948e": [], "9994485e": [], "3592375e": [], "back": [35, 38, 64], "onx6": 35, "sess6": 35, "res6": 35, "114": 35, "714": [], "plot_dbegin_options_zipmap": [35, 58], "facilit": 36, "deploy": 36, "harm": 36, "dy": [36, 64], "dx": [36, 64], "delta": [36, 64], "leqslant": 36, "sup_x": 36, "left": [36, 44, 57, 64], "vert": 36, "right": [36, 40, 44, 57, 64], "howev": [36, 41, 42, 59, 62, 63, 64], "huge": 36, "overcom": 36, "built": [36, 37], "round": [36, 40, 64], "comparison": [36, 37, 64], "f32": 36, "null": [36, 55], "discord": 36, "area": 36, "onnxpipelin": 36, "area_mismatch_rul": 36, "rule": [36, 62, 64], "xst": 36, "yst": 36, "xsf": 36, "ysf": 36, "c1": [36, 55], "c2": 36, "36e": 36, "disagre": 36, "region": [36, 57, 64], "set_xlabel": 36, "set_ylabel": 36, "legend": 36, "0x7fcf24d92e00": [], "frequent": 36, "categor": 36, "yi_test": 36, "decisiontreeregressordecisiontreeregressor": 36, "226": 40, "75539265327654": [], "779099289574646": [], "everywher": [36, 64], "epkg": [36, 38, 40, 42, 45], "compromis": 36, "casttransformercasttransform": 36, "skl2": 36, "ort2": 36, "779099289574624": [], "model3": 36, "skl3": 36, "ort3": 36, "02029352496902e": [], "774273626125685e": [], "discontinu": [36, 64], "along": [36, 55, 64], "idea": [36, 64], "model_onx": 36, "onnxtransform": [36, 40], "onnx_byt": 36, "quot": 36, "xf6": 36, "xa6": 36, "x01x": 36, "gt": [36, 42], "xc3": [], "x99": [], "xbb": [], "lt": [36, 42], "xc3d": [], "xa3": [], "xbe": 36, "xa1g3": [], "xbf": 36, "xe5": [], "xab": [], "xf7": [], "xae": [], "xf1": 36, "xaat": [], "xbc": [], "x9d": [], "x95": [], "x8a": [], "x83": [], "xb6": [], "xce": [], "x80": 36, "xc7": [], "x1b": [], "x1emlprodict_onnx": 36, "x11": 36, "nb": 36, "onnxpipelineonnxpipelin": 36, "onnxtransformeronnxtransform": 36, "x08skl2on": 36, "enforce_float32": 36, "lowerss": 36, "skl4": 36, "ort4": 36, "7606905291577277e": [], "855": [], "plot_ebegin_float_doubl": [36, 58], "alter": [37, 62, 63], "layer": [37, 64], "neural": [37, 40, 64], "network": [37, 40, 64], "reaason": 37, "often": [37, 40, 52, 64], "mismatch": [37, 63], "overload": [37, 64], "km": 37, "n_init": [37, 64], "kmeanskmean": 37, "popul": [37, 64], "cach": 37, "12119834": [], "21295824": 37, "98940603": [], "6755083": [], "99604549": 37, "01793312": [], "97416665": [], "65198444": 37, "19343668": [], "88014429": [], "9034561": 37, "19784749": [], "30022609": [], "40215457": 37, "11157152": [], "50554424": [], "21154793": 37, "89893116": [], "14856384": [], "50244932": 37, "21638048": [], "99184826": [], "09132468": 37, "97313411": [], "92515933": [], "42174651": 37, "40757189": [], "79398956": [], "78993078": 37, "05764261": [], "32125333": [], "78999385": 37, "92088109": [], "0493632": [], "27618123": 37, "07853631": [], "80635045": [], "03497888": 37, "16440431": [], "21220972": [], "33482453": 37, "63069748": [], "88834965": [], "63865558": 37, "14619343": [], "4998303": [], "39898792": 37, "49547518": [], "60978017": [], "20748818": 37, "02966144": [], "05594182": [], "21618828": 37, "91388548": [], "34493953": [], "20986655": 37, "72562039": [], "50065397": [], "86706182": 37, "10101938": [], "80825681": [], "50401564": 37, "66383713": [], "27800809": [], "66826437": 37, "94496718": [], "58990876": [], "68658071": 37, "51061335": [], "55934697": [], "47945627": 37, "57996434": [], "96493153": [], "36345425": 37, "98817445": [], "55682739": [], "99023912": 37, "88431906": [], "8279719": [], "22683089": 37, "79088782": [], "05970831": [], "2947186": 37, "89539875": [], "95425291": [], "25361098": 37, "88085622": [], "87745051": [], "65019824": 37, "09851673": [], "73238773": [], "80138328": 37, "01796142": [], "73361981": [], "52309257": 37, "57350896": [], "11853014": [], "57658655": 37, "5037664": [], "22845606": [], "87652483": 37, "4465301": [], "71452112": [], "76858489": 37, "97906378": [], "86508665": [], "54896332": 37, "01986385": [], "0573692": [], "63079314": 37, "80064093": [], "40284985": [], "45982568": 37, "25136846": [], "00742655": [], "2336976": 37, "42052558": [], "95472117": [], "14580827": 37, "90865188": [], "12324651": [], "20261743": 37, "01192633": [], "90164193": [], "67055552": 37, "64398605": [], "15411688": [], "90927099": 37, "42154566": [], "8613548": [], "50081008": 37, "70483773": [], "34606471": [], "92159916": 37, "9078554": [], "65231058": [], "01946042": 37, "01421067": [], "53206587": [], "86953764": 37, "14238152": [], "99813103": [], "72275914": 37, "23577398": [], "34116935": [], "72324305": 37, "97409784": [], "90222887": [], "30295342": 37, "97223984": [], "9003878": [], "43619989": 37, "95288059": [], "41851492": [], "97232682": 37, "99352148": [], "68457079": [], "51850037": 37, "72661726": [], "96940962": [], "33264308": 37, "69898424": [], "9112523": [], "35747592": 37, "11074501": [], "35721918": [], "77550662": 37, "8143491": [], "59351202": [], "01808184": 37, "00650285": [], "50213315": [], "77360088": 37, "31296552": [], "11632078": [], "21148368": 37, "14114175": [], "77921299": [], "66294828": 37, "42994048": [], "97194958": [], "62389817": 37, "73666782": [], "77530513": [], "70011145": 37, "45918639": [], "25941769": [], "53658932": 37, "74268279": [], "66155141": [], "98813829": 37, "28976474": [], "73833453": [], "32311723": 37, "05251547": [], "46572707": [], "14311522": 37, "98780965": [], "80185102": [], "68234835": 37, "67700171": [], "568386": [], "63954211": 37, "12682734": [], "19987895": [], "97369206": 37, "33743839": [], "67881532": [], "87494798": 37, "46667974": [], "34222961": [], "03853641": 37, "1880022": [], "53061062": [], "8022861": 37, "63233668": [], "79234309": [], "68305664": 37, "65142259": [], "57371215": [], "96833851": 37, "54593744": [], "90589785": [], "9760862": 37, "2933375": [], "22490527": [], "13002382": 37, "03085926": [], "26783271": [], "56679427": 37, "09304603": [], "42114042": [], "5903606": 37, "52050254": [], "58974672": [], "93839428": 37, "34712856": [], "76432091": [], "58203512": 37, "44164622": [], "89738242": [], "99796537": 37, "69027665": [], "98549851": [], "92597852": 37, "76965187": [], "3921368": [], "68907313": 37, "02829879": [], "54223583": [], "42215998": 37, "4211892": [], "90567816": [], "62771445": 37, "88799766": [], "70872911": [], "75915071": 37, "39853465": [], "48190142": [], "30075052": 37, "78009974": [], "06129323": [], "73017167": 37, "2083069": [], "81863359": [], "37943811": 37, "87666989": [], "599882": [], "98789866": 37, "41035271": [], "4914813": [], "89079656": 37, "26782134": [], "84409423": [], "86642713": 37, "25085451": [], "38941349": [], "86642575": 37, "11791607": [], "53271026": [], "96966239": 37, "35089399": [], "30831638": [], "77003779": 37, "05312152": [], "81726253": [], "38255534": 37, "83091351": [], "56428027": [], "55559903": 37, "80454586": [], "72672271": [], "8455521": 37, "39825227": [], "28805849": [], "56987887": 37, "06324547": [], "38163798": [], "64007308": 37, "89861511": [], "31271244": [], "24274589": 37, "0584579": [], "76585766": [], "57067982": 37, "5185265": [], "14762671": [], "44150237": 37, "52472": [], "17645413": [], "69480186": 37, "77236486": [], "73594932": [], "11613683": 37, "53031563": [], "78128346": [], "03326801": 37, "2022172": [], "22550604": [], "3503222": 37, "74462238": [], "2426558": [], "577021": 37, "92275933": [], "50462864": [], "363498": 37, "40314162": [], "22975724": [], "79334275": 37, "48323372": [], "71837714": [], "62749566": 37, "4787491": [], "10409694": [], "89360823": 37, "0325986": [], "80475907": [], "1132966": 37, "27818948": [], "94858807": [], "82688169": 37, "91870424": [], "39433359": [], "91538879": 37, "49910975": [], "90677079": [], "89835633": 37, "68622715": [], "39713702": [], "70128288": 37, "46463058": [], "85224062": [], "18341242": 37, "10127163": [], "95786451": [], "58136629": 37, "83092395": [], "17790381": [], "02615768": 37, "37017622": [], "27442972": [], "31907679": 37, "52540209": [], "91211061": [], "4288432": 37, "62249456": [], "77937737": [], "19031307": 37, "47042293": [], "84735471": [], "64273089": 37, "15814207": [], "15695444": [], "00723617": 37, "520093": [], "33581345": [], "2637671": 37, "66660166": [], "79774043": [], "45930032": 37, "08324891": [], "022307": [], "27575645": 37, "94925151": [], "3842265": [], "05342943": 37, "84098317": [], "03854964": [], "1585729": 37, "75748198": [], "28297732": [], "71100584": 37, "07124861": [], "88774921": [], "12224641": 37, "17345728": [], "47357101": [], "13401784": 37, "87682321": [], "7964005": [], "39830644": 37, "11534598": [], "80521086": [], "63719075": 37, "59782917": [], "8607372": [], "08776655": 37, "25982873": [], "3101089": [], "00416552": 37, "07214028": [], "46990247": [], "58815834": 37, "51434392": [], "97017134": [], "19454679": 37, "0762733": [], "97333575": [], "09907253": 37, "23050145": [], "07939567": [], "28416057": 37, "57373487": [], "06609741": [], "17402084": 37, "51130902": [], "24723796": [], "32128686": 37, "54141867": [], "42521977": [], "3480018": 37, "85128501": [], "82594618": [], "1240495": 37, "52475835": [], "03093862": [], "97564407": 37, "52100812": [], "44892686": [], "7539635": 37, "44371189": [], "17585453": [], "7969924": 37, "08437101": [], "00508668": [], "25638099": 37, "13739231": [], "captur": 37, "truncat": [37, 64], "799262827148709e": 37, "095537650763756e": 37, "flog": 37, "ki": 37, "9830552339553833": [], "035177230834961": 37, "ge_gemmcst": 37, "3049873113632202": 37, "1359702348709106": [], "kr": 37, "340226411819458": 37, "0190045833587646": 37, "reducesumsquar": [37, 64], "re_reduced0": 37, "re_reducesumsquar": 37, "850505828857422": 37, "376197338104248": 37, "ge_y0": 37, "ge_gemm": 37, "366023063659668": 37, "967348575592041": [], "ad_c01": 37, "98982572555542": 37, "817853927612305": [], "ad_c0": 37, "ad_add1": 37, "045351505279541016": 37, "143783569335938": [], "sqrt": [37, 64], "sq_sqrt": 37, "2129589319229126": 37, "017932891845703": [], "ar_argmin": 37, "1211984": [], "21295893": 37, "9894059": [], "675508": [], "99604493": 37, "017933": [], "98305523": [], "035177": 37, "32593": [], "05021989": [], "8833765": [], "34773782": [], "2815273": [], "0145789": 37, "85326266": 37, "3049873": 37, "2548935": 37, "1359702": [], "08842168": [], "9961545": [], "0175261": [], "kv": 37, "20000000298023224": [37, 64], "099999904632568": 37, "9006812": 37, "0190046": 37, "3402264": 37, "3154442": 37, "1430167": 37, "13197924": 37, "3761973": 37, "850506": 37, "3826268": [], "366023": 37, "2132325": [], "3247827": [], "893578": 37, "9673486": [], "758824": [], "9898257": 37, "58943": [], "1752887": [], "0430717": 37, "817854": [], "741879": [], "04535151": 37, "9153595": [], "158344": [], "9921055": 37, "143784": [], "onnx_ml_pb2": 37, "mi": [37, 54], "581": [], "plot_fbegin_investig": [37, 58], "vocabulari": [38, 51, 54, 64], "promot": 38, "zoo": [38, 40], "add_output_initi": 38, "penalti": [38, 64], "elasticnet": [38, 64], "saga": 38, "l1_ratio": [38, 64], "_sag": 38, "350": 38, "coef_": [38, 46, 47, 48, 49, 50, 52], "new_onx": 38, "9050272e": [], "4973426e": [], "3626854e": [], "1473353e": [], "8225316e": [], "6176320e": [], "major": [38, 64], "those": [38, 64], "simple_onx": 38, "memori": [38, 64], "never": [38, 64], "simplified_model": 38, "035": [], "plot_gbegin_cst": [38, 58], "ingest": [39, 52], "held": 39, "pipeline2dot": 39, "onnx_conv": 39, "guess_schema_from_data": 39, "to_onnx_ext": 39, "cat1": 39, "cat2": 39, "num1": 39, "num2": 39, "cat_col": 39, "remaind": [39, 64], "passthroughpassthroughrandomforestclassifierrandomforestclassifi": 39, "incompat": 39, "arg0": 39, "arg1": 39, "arg2": 39, "runopt": 39, "0x7fcf24b71030": [], "shortcut": [39, 52], "probil": 39, "83000004": [], "16999999": [], "26999998": 39, "68000007": [], "31999996": [], "2299999": [], "7700001": [], "ok": [39, 52], "seen": [39, 64], "becom": [39, 44, 47, 61, 64, 67], "2700004": [], "7299996": [], "23000044": [], "76999956": [], "3700003": [], "6299997": [], "517": [], "plot_gbegin_datafram": [39, 58], "pytorch": [40, 64], "skorch": 40, "kera": 40, "tensorflow": [40, 64], "wrapper": [40, 62], "scikit_learn": 40, "sy": 40, "io": [40, 46, 48, 52], "bytesio": [40, 46, 48, 52], "plot_gallery_imag": 40, "imagenet_class": 40, "class_nam": 40, "pil": 40, "urllib": 40, "request": [40, 46, 48, 61, 64], "download_fil": 40, "url": [40, 54], "min_siz": 40, "urlopen": 40, "squeezenet1": 40, "url_nam": 40, "vision": 40, "classif": [40, 64], "squeezenet": 40, "exit": 40, "4956208": [], "inp": 40, "nodearg": 40, "224": 40, "seri": [40, 57, 64], "upload": 40, "wikimedia": 40, "wikipedia": [40, 64], "d2": [40, 64], "east_coker_elm": 40, "2c_2": 40, "jpg": 40, "im0": 40, "im": 40, "resiz": [40, 64], "712230": [], "predect": 40, "im2arrai": 40, "asarrai": 40, "145": 40, "59459": 40, "06765": 40, "599808": 40, "293957": 40, "982475": 40, "interpret": [40, 64], "84172": 40, "samoi": 40, "samoyed": 40, "212": 40, "0366": 40, "park": 40, "bench": 40, "225": 40, "50687": 40, "lakesid": 40, "lakeshor": 40, "232": 40, "90251": 40, "fountain": 40, "258": 40, "10965": 40, "geyser": 40, "rotat": 40, "answer": [40, 56], "angl": 40, "prob": [40, 64], "pl": 40, "revers": [40, 64], "climg": 40, "247": 40, "06139": 40, "obelisk": 40, "238": 40, "9538": 40, "car": 40, "mirror": [40, 64], "235": 40, "27649": 40, "flagpol": 40, "flagstaff": 40, "231": 40, "5171": 40, "window": [40, 64], "screen": 40, "230": 40, "90662": 40, "picket": 40, "fenc": 40, "pale": 40, "254": 40, "24677": 40, "251": 40, "51357": 40, "10507": 40, "groom": 40, "bridegroom": 40, "234": 40, "52951": 40, "13913": 40, "church": 40, "5695": 40, "59703": 40, "46768": 40, "221": 40, "46794": 40, "220": 40, "88501": 40, "265": 40, "508": 40, "243": 40, "68616": 40, "9296": 40, "73679": 40, "pedest": 40, "plinth": 40, "footstal": 40, "11945": 40, "pyrene": 40, "287": 40, "74472": 40, "255": [40, 50, 58, 64], "25317": 40, "236": 40, "84944": 40, "223": 40, "02904": 40, "222": 40, "80466": 40, "267": [35, 40, 58], "6353": 40, "4896": 40, "214": [29, 40], "64238": 40, "56232": 40, "mobil": 40, "manufactur": 40, "213": 40, "12415": 40, "74826": 40, "38455": 40, "24373": 40, "198": 40, "37132": 40, "beacon": 40, "lighthous": 40, "light": 40, "pharo": 40, "197": 40, "4381": 40, "34743": 40, "209": 40, "60362": 40, "207": 40, "06918": 40, "american": 40, "egret": 40, "heron": 40, "egretta": 40, "albu": 40, "201": 40, "63097": 40, "7567": 40, "9874": 40, "216": 40, "63417": 40, "73239": 40, "60934": 40, "46214": 40, "swim": 40, "trunk": 40, "bath": 40, "253": 40, "32697": 40, "6997": 40, "golf": 40, "ball": 40, "50497": 40, "36351": 40, "sulphur": 40, "crest": 40, "cockatoo": 40, "kakato": 40, "galerita": 40, "cacatua": 40, "217": 40, "73135": 40, "244": 40, "30106": 40, "solar": 40, "dish": 40, "collector": 40, "furnac": 40, "239": 40, "57333": 40, "92139": 40, "62112": 40, "87936": 40, "crt": 40, "consist": [40, 57, 63, 64], "projet": 40, "model_byt": 40, "onnxruntime1": 40, "change_batch_s": 40, "proj": 40, "676": 40, "5762": [], "203": 40, "35454": 40, "6654": [], "208": 40, "0977": [], "339": 40, "8116": [], "33977": [], "555634": [], "44832": [], "22363": [], "157": 40, "61395": 40, "596": 40, "3861": [], "21027": [], "918": 40, "86115": [], "33976": [], "87164": [], "128": [40, 64], "2727": [], "306": 40, "68555": [], "156": 40, "42906": [], "912056": [], "119": 40, "21836": [], "446": 40, "6046": [], "342": 40, "45865": [], "504": 40, "90247": [], "02553": [], "0f": [40, 64], "el": 40, "annot": 40, "xy": 40, "xytext": 40, "fontsiz": 40, "textcoord": 40, "va": [40, 63], "bottom": 40, "bbox": 40, "boxstyl": 40, "fc": 40, "arrowprop": 40, "arrowstyl": 40, "connectionstyl": 40, "arc3": 40, "rad": 40, "squeezenet0_conv0_fwd": 40, "squeezenet0_relu0_fwd": 40, "squeezenet0_pool0_fwd": 40, "squeezenet0_conv1_fwd": 40, "squeezenet0_relu1_fwd": 40, "squeezenet0_conv2_fwd": 40, "squeezenet0_relu2_fwd": 40, "squeezenet0_conv3_fwd": 40, "squeezenet0_relu3_fwd": 40, "squeezenet0_concat0": 40, "squeezenet0_conv4_fwd": 40, "squeezenet0_relu4_fwd": 40, "squeezenet0_conv5_fwd": 40, "squeezenet0_relu5_fwd": 40, "squeezenet0_conv6_fwd": 40, "squeezenet0_relu6_fwd": 40, "squeezenet0_concat1": 40, "squeezenet0_pool1_fwd": 40, "squeezenet0_conv7_fwd": 40, "squeezenet0_relu7_fwd": 40, "squeezenet0_conv8_fwd": 40, "squeezenet0_relu8_fwd": 40, "squeezenet0_conv9_fwd": 40, "squeezenet0_relu9_fwd": 40, "squeezenet0_concat2": 40, "squeezenet0_conv10_fwd": 40, "squeezenet0_relu10_fwd": 40, "squeezenet0_conv11_fwd": 40, "squeezenet0_relu11_fwd": 40, "squeezenet0_conv12_fwd": 40, "squeezenet0_relu12_fwd": 40, "squeezenet0_concat3": 40, "squeezenet0_pool2_fwd": 40, "squeezenet0_conv13_fwd": 40, "squeezenet0_relu13_fwd": 40, "squeezenet0_conv14_fwd": 40, "squeezenet0_relu14_fwd": 40, "squeezenet0_conv15_fwd": 40, "squeezenet0_relu15_fwd": 40, "squeezenet0_concat4": 40, "squeezenet0_conv16_fwd": 40, "squeezenet0_relu16_fwd": 40, "squeezenet0_conv17_fwd": 40, "squeezenet0_relu17_fwd": 40, "squeezenet0_conv18_fwd": 40, "squeezenet0_relu18_fwd": 40, "squeezenet0_concat5": 40, "squeezenet0_conv19_fwd": 40, "squeezenet0_relu19_fwd": 40, "squeezenet0_conv20_fwd": 40, "squeezenet0_relu20_fwd": 40, "squeezenet0_conv21_fwd": 40, "squeezenet0_relu21_fwd": 40, "squeezenet0_concat6": 40, "squeezenet0_conv22_fwd": 40, "squeezenet0_relu22_fwd": 40, "squeezenet0_conv23_fwd": 40, "squeezenet0_relu23_fwd": 40, "squeezenet0_conv24_fwd": 40, "squeezenet0_relu24_fwd": 40, "squeezenet0_concat7": 40, "squeezenet0_dropout0_fwd": 40, "squeezenet0_conv25_fwd": 40, "squeezenet0_relu25_fwd": 40, "squeezenet0_pool3_fwd": 40, "squeezenet0_flatten0_reshape0": 40, "select": [40, 50, 64], "pipe2": 40, "169000": 40, "proj2": 40, "498": [], "plot_gbegin_transfer_learn": [40, 58], "3480712e": [], "8039539e": [], "5612395e": [], "7818456e": [], "2171751e": [], "7962424e": [], "x56": 41, "impact": [41, 44, 62], "rename_result": 41, "proposed_nam": 41, "_sklearnlinearclassifi": 41, "_label": 41, "_probabl": 41, "_linearclassifi": 41, "_probability_tensor": 41, "_normal": 41, "060": [], "plot_gconvert": [41, 58], "get_attribute_valu": 42, "guess_tensor_typ": 42, "_apply_zipmap": 42, "_get_sklearn_operator_nam": 42, "convert_to_onnx_object": 42, "rate": [42, 64], "8212475": [], "6m": 42, "remain": [42, 56, 64], "115m": [], "6628956": [], "3m": [], "2m": [], "5719122": [], "59m": [], "0u": 42, "core": [42, 61], "0x7fcf248761d0": [], "itself": [42, 64], "skl2onnx_parser_castboost_classifi": 42, "get_opt": [42, 44, 49], "no_zipmap": 42, "bool": [42, 64], "label_vari": 42, "prob_dtyp": 42, "probability_tensor_vari": 42, "skl2onnx_convert_catboost": 42, "sparse_initi": 42, "treeensembl": [42, 44], "join": [42, 54], "att": [42, 52], "catboostcatboostclassifi": 42, "pipeline_catboost": 42, "75072795": [], "14217771": [], "10709434": [], "7507279515266418": [], "14217770099639893": [], "10709432512521744": [], "089": [], "plot_gexternal_catboost": [42, 58], "29389219": [], "2951622": [], "41094561": [], "29389214515686035": [], "29516223073005676": [], "4109455943107605": [], "487": [], "plot_gexternal_lightgbm": [43, 58], "summat": [44, 64], "t_i": 44, "sum_": 44, "grow": [44, 69], "ak": 44, "pv": 44, "calculate_linear_regressor_output_shap": [44, 45], "oml_vers": 44, "randint": 44, "reg": 44, "lgbmregressorlgbmregressor": 44, "skl2onnx_convert_lightgbm": 44, "releas": [44, 59, 69], "scenario": [44, 60, 61, 64], "model_onnx_split": 44, "sess_split": 44, "got_split": 44, "disp": 44, "disp_split": 44, "ratio": [44, 64], "00021804482619560382": [], "58190496289688e": [], "75882472380541": [], "were": [44, 51, 63, 64, 67], "disc": 44, "disc_split": 44, "7152381048823884e": [], "32478184819729e": [], "9660684979922296": [], "150": 44, "22529028200006": [], "0696370939999724": [], "against": [44, 64], "170": 44, "200": [44, 56, 64], "400": [44, 56], "baselin": 44, "96": 32, "352735e": [], "000002": 44, "363267e": [], "385508e": [], "623729e": [], "324782e": [], "120": 44, "140": [16, 44], "791191e": [], "114323e": [], "627890e": [], "017920e": [], "795015e": [], "332793e": [], "455": [], "plot_gexternal_lightgbm_reg": [44, 58], "make_classif": 45, "dmatrix": 45, "train_xgb": 45, "convert_xgboost_boost": 45, "xgb": 45, "69600695": 25, "1526681": 25, "15132491": 25, "6960069537162781": 25, "15266810357570648": 25, "15132491290569305": 25, "xgboostxgbregressor": 45, "66574": [], "462975": [], "035915": [], "136": [], "93835": [], "n_inform": 45, "dtrain": 45, "multi": [45, 64], "softmax": [45, 64], "num_class": [45, 64], "bst": 45, "cont": 45, "assertionerror": 45, "old": 45, "041": [], "plot_gexternal_xgboost": [45, 58], "decorrel": [46, 47, 48, 49, 50], "correl": [46, 47, 48, 49, 50, 52], "frac": [46, 48], "covari": [46, 48, 64], "pickl": [46, 48, 52], "guess_numpy_typ": [46, 49, 50, 52, 56], "onnxmatmul": [46, 49, 50, 52], "decorrelatetransform": [46, 47, 48, 49, 50], "gaussian": [46, 47, 48, 49, 50, 52], "avoid": [46, 47, 48, 49, 50, 52, 55, 63, 64], "non": [46, 47, 48, 49, 50, 52, 64], "squar": [46, 47, 48, 49, 50, 64], "root": [46, 47, 48, 49, 50, 52, 64], "coveri": [46, 47, 48, 49, 50], "linalg": [46, 47, 49, 50, 52], "eig": [46, 47, 49, 50], "linv": [46, 47, 49, 50, 52], "diag": [46, 47, 49, 50, 52], "test_decorrelate_transform": [46, 48], "dec": [46, 47, 48, 49, 50, 52], "cov": [46, 48, 52], "st": [46, 48, 52], "dec2": [46, 48, 52], "getvalu": [46, 48, 52], "0167562": [46, 47, 49, 50], "52111756": [46, 47, 49, 50], "24946737": [46, 47, 49, 50], "56194325": [46, 47, 49, 50], "0727878": [46, 47, 49, 50], "80853732": [46, 47, 49, 50], "43841018": [46, 47, 49, 50], "37441392": [46, 47, 49, 50], "69971891": [46, 47, 49, 50], "09950908": [46, 47, 49, 50], "2138161": [46, 47, 49, 50], "3499275": [46, 47, 49, 50], "13063404": [46, 47, 49, 50], "13540568": [46, 47, 49, 50], "79087008": [46, 47, 49, 50], "73938966": [46, 47, 49, 50], "35790036": [46, 47, 49, 50], "91900236": [46, 47, 49, 50], "04034399": [46, 47, 49, 50], "6509266": [46, 47, 49, 50], "84333333": 46, "05733333": 46, "758": 46, "19933333": 46, "8040383": 46, "94252732": 46, "22382017": 46, "36769632": 46, "03632069": 46, "86741369": 46, "52213719": 46, "93652687": 46, "02453122": 46, "83455725": 46, "decorrelate_transformer_shape_calcul": [46, 47, 48, 49, 50], "thing": [46, 48, 64], "pai": [46, 48], "attent": [46, 48], "decorrelate_transformer_convert": [46, 47, 48, 49, 50], "strict": [46, 47, 56, 64], "sklearndecorrelatetransform": [46, 47, 48, 49, 50], "04657619085458e": [46, 47, 49, 50], "0002951417065406967": [46, 47, 49, 50], "609": [], "plot_icustom_convert": [46, 58], "k_mean": 47, "ada_boost": 47, "ref": 47, "guess_proto_typ": [47, 52], "apply_sub": 47, "proto_dtyp": [47, 52], "mean_nam": 47, "coef_nam": 47, "coef": [47, 52], "op_nam": [47, 52], "sub_nam": 47, "accross": [47, 63], "operator_nam": [47, 52], "matmul": [47, 49, 64], "plot_jcustom_syntax": [47, 58], "patchwork": 48, "pca_": 48, "68412563e": 48, "19397247e": 48, "79148276e": 48, "26243707e": 48, "71414169e": 48, "77001225e": 48, "10464272e": 48, "90265503e": 48, "88899057e": 48, "44949426e": 48, "79002563e": 48, "99683897e": 48, "74534286e": 48, "18298979e": 48, "15593736e": 48, "55758166e": 48, "72871654e": 48, "26754513e": 48, "00792406e": 48, "12585926e": 48, "560125949597648e": 48, "0003158352661960492": 48, "924": [], "plot_kcustom_converter_wrapp": [48, 58], "use_gemm": 49, "onnxgemm": [49, 50], "beta": [49, 50, 64], "01757041717876e": [49, 50], "0005483764980468156": [49, 50], "000081": [], "000033": [], "000052": [], "000055": [], "000175": [], "000442": [], "plot_lcustom_opt": [49, 58], "sake": [50, 64], "y1": [50, 64], "y2": [50, 64], "decorrelate_transformer_pars": 50, "cls_type": [50, 56], "val_y1": [50, 56], "nogemm": 50, "val_y2": [50, 56], "ge_gemmcst1": 50, "ma_matmulcst": 50, "su_subcst": 50, "su_c0": 50, "n0_sub": 50, "n1_matmul": 50, "n2_gemm": 50, "853": [], "plot_mcustom_pars": [50, 58], "13733": 51, "bi": 51, "gram": [51, 64], "unigram": 51, "corpu": 51, "pattern": [51, 62], "mod1": 51, "ngram_rang": [51, 64], "token_pattern": [51, 64], "tfidfvectorizertfidfvector": 51, "place": [51, 64], "vocabulary_": 51, "ambigu": [51, 52, 55, 62, 64], "occur": [51, 64], "concaten": [51, 64], "mod2": 51, "037": [], "plot_ngram": [51, 58], "written": [52, 53], "eigen": 52, "anyth": 52, "ops_cpu": 52, "opruncustom": 52, "register_oper": 52, "onnxgatherel": 52, "onnxeyelik": 52, "onnxpow": 52, "onnxreducemean_13": 52, "onnxshap": 52, "onnxoper": 52, "livedecorrelatetransform": 52, "nf_": 52, "x2": [52, 64], "test_live_decorrelate_transform": 52, "everyth": 52, "algorithm": [52, 63, 64], "exercis": 52, "group": [52, 64], "offici": [52, 69], "onnxcustom": 52, "onnxeig": 52, "since_vers": 52, "expected_input": 52, "expected_output": 52, "eigenvalu": 52, "eigenvector": 52, "input_rang": 52, "output_rang": 52, "is_deprec": 52, "deprec": [52, 55, 64], "past_vers": 52, "eigv": 52, "live_decorrelate_transformer_shape_calcul": 52, "live_decorrelate_transformer_convert": 52, "trick": 52, "ouput": 52, "unless": [52, 55, 61, 64], "set_onnx_name_prefix": 52, "nf": 52, "v1": 52, "v2": [52, 59], "lp": [52, 64], "notat": [52, 64], "trv": 52, "coef_left": 52, "stage": [52, 64], "known": [52, 61, 63, 64], "abl": [52, 63], "alloc": [52, 64], "opeig": 52, "onnx_nod": 52, "desc": 52, "constructor": 52, "expected_attribut": 52, "eigval": 52, "sklearnlivedecorrelatetransform": 52, "320": [], "plot_pextend_python_runtim": [52, 58], "ambiti": 53, "000": [27, 53, 58], "plot_qextend_onnxruntim": [53, 58], "larger": [54, 64], "epgk": [54, 55, 56], "print_sparse_matrix": 54, "nonan": 54, "nan_to_num": 54, "ma": 54, "str_": 54, "dens": [54, 64], "chr": 54, "ord": 54, "sentenc": 54, "charact": [54, 62, 64], "classnam": 54, "var2": 54, "mail": 54, "anywher": 54, "pwd": 54, "c79857654": 54, "76543u3456": 54, "hhh": 54, "amp": 54, "05t11": 54, "dd76543u3456": 54, "ddhhh": 54, "u69": 54, "pre": 54, "leav": [54, 64, 69], "tfidf_step": 54, "transformers_": [54, 63], "get_feature_names_out": 54, "rr": [54, 64], "jj": 54, "jh": 54, "jjj": 54, "hjjj": 54, "ik": 54, "25823e": 54, "022": [], "plot_transformer_discrep": [54, 58], "stai": 55, "until": [55, 62, 64], "present": [55, 64], "configur": 55, "xgbclassifi": 55, "replacetransform": [55, 64], "cst": 55, "artif": 55, "sparse_threshold": [55, 64], "df_train": 55, "replace_nan": 55, "insert_replac": 55, "use_label_encod": 55, "scale1": 55, "count": [55, 64], "repl": 55, "typeerror": 55, "__name__": [55, 60], "catch_warn": 55, "record": 55, "simplefilt": 55, "futurewarn": 55, "c0": 55, "data_spars": 55, "89it": [], "lib": 55, "python3": 55, "site": [55, 61], "1395": 55, "88it": [], "78it": [], "774363": [], "119938": [], "000007": [], "hurt": 55, "data_dens": 55, "34it": [], "49it": 55, "60it": [], "44it": [], "04it": [], "9006811702978088": 55, "019004351971607": 55, "4323732931220851": 55, "9016947018779491": 55, "1430169111851105": 55, "13197947932162468": 55, "90068117": 55, "01900435": 55, "43237329": 55, "9016947": 55, "14301691": 55, "13197948": 55, "oppos": 55, "503027": [], "19it": 3, "58it": [], "262689": [], "tune": 55, "186": [], "plot_usparse_xgboost": [55, 58], "685": [4, 27, 56], "onnxless": 56, "onnxconcat": 56, "onnxclip": 56, "data1": 56, "df1": 56, "dumdf1": 56, "get_dummi": 56, "partial_fit": 56, "sc_data": 56, "bootstrap": [56, 64], "contamin": [56, 64], "randomst": 56, "n_job": [56, 63, 64], "feature_names2": 56, "threadingbackend": 56, "concurr": 56, "worker": 56, "elaps": 56, "finish": 56, "pyod_iforest_pars": 56, "pyod_iforest_shape_calcul": 56, "pyod_iforest_convert": 56, "detector": 56, "detector_": 56, "lab_pr": 56, "threshold_": 56, "train_scor": 56, "decision_scores_": 56, "scores_": 56, "min_": 56, "scale_": 56, "scaled_cent": 56, "clip": [56, 64], "clipped_": 56, "scores_2d": 56, "pyodiforest": 56, "75171798": 56, "95064645": 56, "expected_label": 56, "expected_proba": 56, "onx_label": 56, "onx_proba": 56, "diff_label": 56, "diff_proba": 56, "dicrep": 56, "684300415451318e": 56, "809063": 56, "19093698": 56, "41380423": 56, "58619577": 56, "61369824": 56, "38630173": 56, "459": [], "plot_wext_pyod_forest": [56, 58], "evid": 57, "ten": 57, "leftright": 57, "107": [16, 29, 57], "woe1": 57, "prd": 57, "woe2": 57, "omit": [57, 64], "2674": 57, "3321": 57, "2743": 57, "5696": 57, "extrem": [57, 64], "inf": [57, 64], "instruct": 57, "onxinf": 57, "plot_woe_transform": [57, 58], "auto_tutori": 58, "toolkit": 59, "introduct": 59, "summari": [59, 64], "question": 59, "submit": 59, "__max_supported_opset__": 59, "libsvm": 59, "torch": [59, 64], "mxnet": 59, "credit": 59, "engin": 59, "scientist": 59, "winter": 59, "zeeshan": 59, "ahm": 59, "wei": 59, "sheng": 59, "chin": 59, "aidan": 59, "crook": 59, "xavier": 59, "dupr\u00e9": 59, "costin": 59, "eseanu": 59, "tom": 59, "finlei": 59, "lixin": 59, "gong": 59, "scott": 59, "ingli": 59, "pei": 59, "jiang": 59, "ivan": 59, "matantsev": 59, "prabhat": 59, "roi": [59, 64], "siddiqui": 59, "shouheng": 59, "yi": 59, "shauheen": 59, "zahirazami": 59, "yiwen": 59, "zhu": 59, "du": 59, "li": 59, "xuan": 59, "wenb": 59, "licens": 59, "apach": 59, "advanc": [60, 64], "ver": 60, "1887": 60, "3776": 60, "high": [61, 64], "perform": [61, 64], "rather": [61, 64], "applic": [61, 64], "servic": 61, "At": [61, 64], "briefli": 61, "ll": 61, "famou": 61, "commonli": 61, "undefin": [61, 64], "discuss": 61, "develop": [61, 67], "proper": 61, "compur": 62, "onnx_cdist": 62, "scan": [62, 64], "2442": 62, "text_vectoris": 62, "convert_sklearn_text_vector": 62, "re2": 62, "pyre2": 62, "za": 62, "z0": 62, "9_": 62, "sentanc": 62, "analys": 62, "char": 62, "char_wb": 62, "regex": 62, "2149": 62, "pr": [62, 68], "327": 62, "308": 62, "referenc": [62, 64], "Or": [62, 64], "classifier__zipmap": 62, "structur": [62, 63], "effect": [62, 64], "vb": 63, "vc": 63, "vcat": 63, "vcat2": 63, "tol": [63, 64], "tsvd": 63, "arpack": 63, "precprocessor": 63, "repres": [63, 64], "numfeat": 63, "strfeat": 63, "tri": 63, "access": [63, 64], "tp": 63, "receiv": 63, "topologi": 63, "consecut": [63, 64], "four": [63, 64], "complet": 63, "stori": 63, "runnabl": 63, "isol": 63, "independ": [63, 64], "unit": [63, 64], "test_investig": 63, "scaler1": 63, "scaler2": 63, "loop": [63, 64], "missingshapecalcul": 63, "guess_data_typ": 63, "enumerate_pipeline_model": 63, "_alter_model_for_debug": 63, "myscal": 63, "scaler3": 63, "recurs": 63, "all_model": 63, "data_in": 63, "ardregress": 64, "ye": 64, "adaboostregressor": 64, "additivechi2sampl": 64, "kernel_approxim": 64, "affinitypropag": 64, "agglomerativeclust": 64, "baggingregressor": 64, "basedecisiontre": 64, "baseensembl": 64, "naive_bay": 64, "bernoullirbm": 64, "neural_network": 64, "binar": 64, "birch": 64, "bisectingkmean": 64, "cca": 64, "cross_decomposit": 64, "calibr": 64, "classifierchain": 64, "dbscan": 64, "dictionarylearn": 64, "elasticnetcv": 64, "ellipticenvelop": 64, "empiricalcovari": 64, "factoranalysi": 64, "fastica": 64, "featureagglomer": 64, "featurehash": 64, "functiontransform": 64, "gammaregressor": 64, "gaussianrandomproject": 64, "random_project": 64, "genericunivariateselect": 64, "feature_select": 64, "graphicallasso": 64, "graphicallassocv": 64, "gridsearchcv": 64, "huberregressor": 64, "incrementalpca": 64, "isotonicregress": 64, "isoton": 64, "kbinsdiscret": 64, "kernelcenter": 64, "kerneldens": 64, "kernelridg": 64, "kernel_ridg": 64, "labelbinar": 64, "labelpropag": 64, "semi_supervis": 64, "labelspread": 64, "lar": 64, "larscv": 64, "lasso": 64, "lassocv": 64, "lassolar": 64, "lassolarscv": 64, "lassolars": 64, "latentdirichletalloc": 64, "ledoitwolf": 64, "lineardiscriminantanalysi": 64, "discriminant_analysi": 64, "svm": 64, "linearsvr": 64, "logisticregressioncv": 64, "meanshift": 64, "mincovdet": 64, "minibatchdictionarylearn": 64, "minibatchnmf": 64, "minibatchsparsepca": 64, "missingind": 64, "multilabelbinar": 64, "multioutputregressor": 64, "multitaskelasticnet": 64, "multitaskelasticnetcv": 64, "multitasklasso": 64, "multitasklassocv": 64, "nearestcentroid": 64, "neighborhoodcomponentsanalysi": 64, "nusvc": 64, "nusvr": 64, "nystroem": 64, "oa": 64, "optic": 64, "oneclasssvm": 64, "multiclass": 64, "ordinalencod": 64, "orthogonalmatchingpursuit": 64, "orthogonalmatchingpursuitcv": 64, "outputcodeclassifi": 64, "plscanon": 64, "plsregress": 64, "plssvd": 64, "passiveaggressiveclassifi": 64, "passiveaggressiveregressor": 64, "perceptron": 64, "poissonregressor": 64, "polynomialcountsketch": 64, "polynomialfeatur": 64, "powertransform": 64, "quantileregressor": 64, "quantiletransform": 64, "ransacregressor": 64, "rbfsampler": 64, "rfe": 64, "rfecv": 64, "radiusneighborstransform": 64, "randomtreesembed": 64, "randomizedsearchcv": 64, "regressorchain": 64, "ridg": 64, "ridgecv": 64, "ridgeclassifi": 64, "ridgeclassifiercv": 64, "sgdoneclasssvm": 64, "sgdregressor": 64, "svr": 64, "selectfdr": 64, "selectfpr": 64, "selectfrommodel": 64, "selectfw": 64, "selectkbest": 64, "selectpercentil": 64, "selftrainingclassifi": 64, "sequentialfeatureselector": 64, "shrunkcovari": 64, "skewedchi2sampl": 64, "sparsecod": 64, "sparsepca": 64, "sparserandomproject": 64, "spectralbiclust": 64, "spectralclust": 64, "spectralcoclust": 64, "splinetransform": 64, "stackingregressor": 64, "theilsenregressor": 64, "transformedtargetregressor": 64, "tweedieregressor": 64, "variancethreshold": 64, "189": 64, "sklearn_op": 64, "castregressor": 64, "from_valu": 64, "to_valu": 64, "n_iter": 64, "001": 64, "alpha_1": 64, "alpha_2": 64, "lambda_1": 64, "lambda_2": 64, "compute_scor": 64, "threshold_lambda": 64, "fit_intercept": 64, "copy_x": 64, "learning_r": 64, "samm": 64, "base_estim": 64, "linear": 64, "max_sampl": 64, "bootstrap_featur": 64, "oob_scor": 64, "warm_start": 64, "covariance_typ": 64, "reg_covar": 64, "init_param": 64, "weight_concentration_prior_typ": 64, "dirichlet_process": 64, "weight_concentration_prior": 64, "mean_precision_prior": 64, "mean_prior": 64, "degrees_of_freedom_prior": 64, "covariance_prior": 64, "verbose_interv": 64, "alpha_init": 64, "lambda_init": 64, "force_alpha": 64, "fit_prior": 64, "class_prior": 64, "sigmoid": 64, "cv": 64, "min_categori": 64, "verbose_feature_names_out": 64, "utf": 64, "decode_error": 64, "strip_acc": 64, "lowercas": 64, "stop_word": 64, "analyz": 64, "max_df": 64, "criterion": 64, "gini": 64, "splitter": 64, "min_samples_split": 64, "min_samples_leaf": 64, "min_weight_fraction_leaf": 64, "max_leaf_nod": 64, "min_impurity_decreas": 64, "ccp_alpha": 64, "squared_error": 64, "precomput": 64, "0001": 64, "posit": 64, "cyclic": 64, "ep": 64, "n_alpha": 64, "auto": 64, "alternate_sign": 64, "transformer_list": 64, "featureunion": 64, "func": 64, "inverse_func": 64, "accept_spars": 64, "check_invers": 64, "feature_names_out": 64, "kw_arg": 64, "inv_kw_arg": 64, "weights_init": 64, "means_init": 64, "precisions_init": 64, "prior": 64, "var_smooth": 64, "fmin_l_bfgs_b": 64, "n_restarts_optim": 64, "max_iter_predict": 64, "copy_x_train": 64, "one_vs_rest": 64, "normalize_i": 64, "compute_inverse_compon": 64, "score_func": 64, "f_classif": 64, "percentil": 64, "log_loss": 64, "subsampl": 64, "friedman_ms": 64, "validation_fract": 64, "n_iter_no_chang": 64, "param_grid": 64, "refit": 64, "pre_dispatch": 64, "error_scor": 64, "return_train_scor": 64, "l2_regular": 64, "max_bin": 64, "monotonic_cst": 64, "interaction_cst": 64, "early_stop": 64, "quantil": 64, "epsilon": 64, "whiten": 64, "n_bin": 64, "lloyd": 64, "missing_valu": 64, "n_neighbor": 64, "uniform": 64, "nan_euclidean": 64, "add_ind": 64, "keep_empty_featur": 64, "leaf_siz": 64, "minkowski": 64, "metric_param": 64, "gamma": 64, "degre": 64, "coef0": 64, "kernel_param": 64, "fit_inverse_transform": 64, "eigen_solv": 64, "iterated_pow": 64, "remove_zero_eig": 64, "neg_label": 64, "pos_label": 64, "sparse_output": 64, "n_nonzero_coef": 64, "220446049250313e": 64, "fit_path": 64, "jitter": 64, "max_n_alpha": 64, "aic": 64, "noise_vari": 64, "svd": 64, "shrinkag": 64, "store_covari": 64, "covariance_estim": 64, "l2": 64, "squared_hing": 64, "dual": 64, "ovr": 64, "intercept_sc": 64, "epsilon_insensit": 64, "novelti": 64, "hidden_layer_s": 64, "activ": 64, "relu": 64, "adam": 64, "learning_rate_init": 64, "power_t": 64, "momentum": 64, "nesterovs_momentum": 64, "beta_1": 64, "beta_2": 64, "999": 64, "max_fun": 64, "15000": 64, "feature_rang": 64, "1024": 64, "compute_label": 64, "max_no_improv": 64, "init_s": 64, "reassignment_ratio": 64, "radiu": 64, "callback": 64, "nu": 64, "shrink": 64, "cache_s": 64, "decision_function_shap": 64, "min_frequ": 64, "max_categori": 64, "unknown_valu": 64, "encoded_missing_valu": 64, "svd_solver": 64, "n_oversampl": 64, "power_iteration_norm": 64, "hing": 64, "eta0": 64, "interaction_onli": 64, "include_bia": 64, "yeo": 64, "johnson": 64, "reg_param": 64, "solver_opt": 64, "min_sampl": 64, "residual_threshold": 64, "is_data_valid": 64, "is_model_valid": 64, "max_trial": 64, "max_skip": 64, "stop_n_inli": 64, "stop_scor": 64, "stop_prob": 64, "absolute_error": 64, "n_features_to_select": 64, "importance_gett": 64, "min_features_to_select": 64, "outlier_label": 64, "gcv_mode": 64, "store_cv_valu": 64, "alpha_per_target": 64, "with_cent": 64, "with_scal": 64, "quantile_rang": 64, "unit_vari": 64, "invscal": 64, "prefit": 64, "norm_ord": 64, "final_estim": 64, "stack_method": 64, "with_mean": 64, "with_std": 64, "use_idf": 64, "smooth_idf": 64, "sublinear_tf": 64, "max_subpopul": 64, "n_subsampl": 64, "power": 64, "hard": 64, "flatten_transform": 64, "onnx_convert": 64, "fetch": [64, 68], "onnx_pars": 64, "_to_onnx": 64, "arg": 64, "sinc": 64, "elementwis": 64, "constraint": 64, "uint8": 64, "uint16": 64, "uint32": 64, "uint64": 64, "int8": 64, "int16": 64, "int32": 64, "float16": 64, "bfloat16": 64, "constrain": 64, "aco": 64, "arccosin": 64, "cosin": 64, "wise": 64, "acosh": 64, "hyperbol": 64, "adagrad": 64, "preview": 64, "stochast": 64, "conduct": 64, "behavior": 64, "norm_coeffici": 64, "decai": 64, "decay_factor": 64, "divid": 64, "direct": 64, "accumul": 64, "sequenti": 64, "x_new": 64, "h_new": 64, "pseudo": 64, "arithmet": 64, "broadcast": 64, "scalar": 64, "_2": 64, "g_regular": 64, "adapt": 64, "coordin": 64, "h_adapt": 64, "x_1": 64, "jointli": 64, "view": 64, "cours": 64, "entir": 64, "jmlr": 64, "paper": 64, "volume12": 64, "duchi11a": 64, "pdf": 64, "special": 64, "composit": 64, "999999974752427e": 64, "2147483647": 64, "t1": 64, "t2": 64, "variad": 64, "t3": 64, "bit": 64, "exponenti": 64, "histor": 64, "moreov": 64, "v_new": 64, "h_sqrt": 64, "th": 64, "r_adjust": 64, "x_final": 64, "norm_coefficient_post": 64, "previous": 64, "8999999761581421": 64, "9990000128746033": 64, "aka": 64, "multidirect": 64, "operand": 64, "hand": 64, "side": 64, "match": 64, "contigu": 64, "mutual": 64, "suffix": 64, "expans": 64, "logic": [64, 68], "boolean": 64, "argmax": 64, "prune": 64, "select_last_index": 64, "occurr": 64, "asin": 64, "arcsin": 64, "sine": 64, "asinh": 64, "atan": 64, "arctang": 64, "tangent": 64, "atanh": 64, "averagepool": 64, "consum": 64, "across": 64, "stride": 64, "length": 64, "downsampl": 64, "further": 64, "output_spatial_shap": 64, "floor": 64, "input_spatial_shap": 64, "pad_shap": 64, "kernel_spatial_shap": 64, "strides_spatial_shap": 64, "ceil": 64, "ceil_mod": 64, "auto_pad": 64, "same_upp": 64, "same_low": 64, "exclud": 64, "count_include_pad": 64, "notset": 64, "output_shap": 64, "input_shap": 64, "odd": 64, "pixel": 64, "edg": 64, "channel": 64, "height": 64, "width": 64, "d1": 64, "dn": 64, "arriv": 64, "data_batch": 64, "data_channel": 64, "data_featur": 64, "vari": 64, "variou": 64, "dilat": 64, "batchnorm": 64, "carri": 64, "arxiv": 64, "1502": 64, "03167": 64, "five": 64, "input_mean": 64, "input_var": 64, "statist": 64, "training_mod": 64, "running_mean": 64, "running_var": 64, "current_mean": 64, "current_var": 64, "reducemean": 64, "all_except_channel_index": 64, "reducevar": 64, "notic": 64, "varianc": 64, "sqrd": 64, "x_i": 64, "x_avg": 64, "formula": 64, "overflow": 64, "depreci": 64, "implementor": 64, "trail": 64, "999999747378752e": 64, "bia": 64, "saved_mean": 64, "saved_var": 64, "5f": 64, "is_test": 64, "nonzero": 64, "9f": 64, "dimension": 64, "nchw": 64, "dure": 64, "mini": 64, "bernoulli": 64, "distribut": 64, "determinist": 64, "outcom": [64, 68], "bitshift": 64, "bitwis": 64, "shift": 64, "toward": 64, "amount": 64, "bitwiseand": 64, "bitwisenot": 64, "bitwiseor": 64, "bitwisexor": 64, "xor": 64, "blackmanwindow": 64, "blackman": 64, "ieeexplor": 64, "ieee": 64, "1455106": 64, "output_datatyp": 64, "strictli": 64, "datatyp": 64, "enum": 64, "period": 64, "symmetr": 64, "hann": 64, "int64_t": 64, "plain": 64, "scientif": 64, "1e8": 64, "liter": 64, "insensit": 64, "similarli": 64, "15926": 64, "hello": 64, "world": 64, "718": 64, "awar": [64, 69], "caus": 64, "1415926459": 64, "141592": 64, "oor": 64, "uint": 64, "discard": 64, "reinterpret": 64, "complement": 64, "sign": 64, "castlik": 64, "target_typ": 64, "castmap": 64, "ascend": 64, "pack": 64, "exce": 64, "max_map": 64, "cast_to": 64, "desir": 64, "to_float": 64, "to_str": 64, "to_int64": 64, "map_form": 64, "br": 64, "categorymapp": 64, "vice": 64, "versa": 64, "default_int64": 64, "default_": 64, "default_str": 64, "_unus": 64, "Its": 64, "celu": 64, "differenti": 64, "control": 64, "centercroppad": 64, "crop": 64, "input_data": 64, "tind": 64, "output_data": 64, "complex64": 64, "complex128": 64, "numeric_limit": 64, "lowest": 64, "minimum": 64, "under": 64, "4028234663852886e": 64, "col2im": 64, "rearrang": 64, "block": 64, "multidimension": 64, "fold": 64, "nn": 64, "dangel": 64, "unfoldnd": 64, "although": 64, "image_shap": 64, "redund": 64, "convolut": 64, "aten": 64, "src": 64, "nativ": 64, "cpp": 64, "l10": 64, "ari": 64, "block_shap": 64, "lexicograph": 64, "h_img": 64, "w_img": 64, "dim_i1": 64, "dim_i2": 64, "dim_in": 64, "h_block": 64, "w_block": 64, "dim_b1": 64, "dim_b2": 64, "dim_bn": 64, "compress": 64, "slice": 64, "condit": 64, "evalu": 64, "exceed": 64, "alon": 64, "concatfromsequ": 64, "new_axi": 64, "stack": 64, "input_sequ": 64, "seq": 64, "sparse_valu": 64, "value_": 64, "constantofshap": 64, "1d": 64, "filter": 64, "2d": 64, "kh": 64, "kw": 64, "k1": 64, "k2": 64, "kn": 64, "filter_out_channel": 64, "filter_in_channel": 64, "filter_spati": 64, "multipli": 64, "convinteg": 64, "x_zero_point": 64, "quantiz": 64, "w_zero_point": 64, "convtranspos": 64, "via": 64, "equat": 64, "input_s": 64, "output_pad": 64, "kernel_shap": 64, "start_i": 64, "end_i": 64, "explicitli": 64, "total_pad": 64, "co": 64, "cosh": 64, "cumsum": 64, "cumul": 64, "inclus": 64, "exclus": 64, "opposit": 64, "input_x": 64, "dft": 64, "discret": 64, "fourier": 64, "onesid": 64, "n_fft": 64, "satisfi": 64, "conjug": 64, "symmetri": 64, "rfft": 64, "batch_idx": 64, "signal_dim1": 64, "signal_dim2": 64, "signal_dimn": 64, "diment": 64, "signal": 64, "imaginari": 64, "dft_length": 64, "signal_dim": 64, "depthtospac": 64, "permut": 64, "depth": 64, "spacetodepth": 64, "dcr": 64, "tmp": 64, "blocksiz": 64, "crd": 64, "arrang": 64, "dequantizelinear": 64, "dequant": 64, "x_scale": 64, "suppos": 64, "de": 64, "det": 64, "inner": 64, "submatric": 64, "string_vocabulari": 64, "dropout": 64, "mask": 64, "henc": 64, "mimic": 64, "dynamicquantizelinear": 64, "fuse": 64, "fp32": 64, "8bit": 64, "zeropoint": 64, "y_scale": 64, "qmax": 64, "qmin": 64, "intermediate_zero_point": 64, "y_zero_point": 64, "satur": 64, "itermediate_zero_point": 64, "127": 64, "ti": 64, "unsign": 64, "einsum": 64, "term1": 64, "term2": 64, "input1": 64, "input2": 64, "einstein": 64, "comma": 64, "letter": 64, "classic": 64, "implicitli": 64, "alphabet": 64, "ellipsi": 64, "0020": 64, "elu": 64, "integr": 64, "erf": 64, "expand": 64, "align": 64, "broadcast_to": 64, "ndim": 64, "eyelik": 64, "diagon": 64, "featurevector": 64, "treat": 64, "maintain": [64, 67], "d_0": 64, "d_1": 64, "d_n": 64, "d_": 64, "d_axi": 64, "outer": 64, "gru": 64, "cudnn": 64, "gate": 64, "reset": 64, "hidden": 64, "zrh": 64, "recurr": 64, "backward": 64, "wbb": 64, "rbb": 64, "state": 64, "num_direct": 64, "bidirect": 64, "tanh": 64, "2x": 64, "affin": 64, "leakyrelu": 64, "thresholdedrelu": 64, "scaledtanh": 64, "hardsigmoid": 64, "softsign": 64, "softplu": 64, "zt": 64, "wz": 64, "ht": 64, "rz": 64, "wbz": 64, "rbz": 64, "wr": 64, "wbr": 64, "rbr": 64, "rh": 64, "rbh": 64, "wbh": 64, "linear_before_reset": 64, "rnn": 64, "forward": 64, "layout": 64, "initial_h": 64, "y_h": 64, "seq_length": 64, "hidden_s": 64, "potenti": 64, "sequence_len": 64, "seq_len": 64, "foward": 64, "output_sequ": 64, "i_": 64, "j_": 64, "gatherel": 64, "identifi": 64, "scatterel": 64, "gathernd": 64, "batch_dim": 64, "indices_shap": 64, "thought": 64, "lead": 64, "salient": 64, "honor": 64, "met": 64, "data_shap": 64, "obtain": 64, "think": 64, "indices_slic": 64, "locat": 64, "scatternd": 64, "en": 64, "wiki": 64, "basic_linear_algebra_subprogram": 64, "level_3": 64, "transa": 64, "transb": 64, "unidirect": 64, "globalaveragepool": 64, "globallppool": 64, "lppool": 64, "globalmaxpool": 64, "maxpool": 64, "partial": 64, "deriv": 64, "wide": 64, "trainabl": 64, "simplic": 64, "dw": 64, "dz": 64, "1st": 64, "2nd": 64, "3rd": 64, "concept": 64, "lstm": 64, "dh": 64, "2o": 64, "h_1": 64, "z_1": 64, "y_1": 64, "symbol": 64, "substitut": 64, "greaterorequ": 64, "greater_equ": 64, "gridsampl": 64, "flow": 64, "grid": 64, "h_out": 64, "w_out": 64, "interpol": 64, "fall": 64, "outsid": 64, "sampler": 64, "1506": 64, "02025": 64, "grid_sampl": 64, "align_corn": 64, "extrema": 64, "corner": 64, "resolut": 64, "agnost": 64, "bilinear": 64, "bicub": 64, "padding_mod": 64, "border": 64, "far": 64, "awai": 64, "groupnorm": 64, "1803": 64, "08494": 64, "num_group": 64, "instancenorm": 64, "layernorm": 64, "hammingwindow": 64, "ham": 64, "hannwindow": 64, "hardswish": 64, "hardmax": 64, "input_feature_dimens": 64, "coerc": 64, "arbitrari": 64, "a_0": 64, "a_1": 64, "a_": 64, "a_k": 64, "throw": 64, "0th": 64, "nxd": 64, "coercion": 64, "cond": 64, "live": 64, "enclos": 64, "then_branch": 64, "else_branch": 64, "branch": 64, "static": 64, "s1": 64, "s2": 64, "compat": 64, "dim_param": 64, "contrast": 64, "canon": 64, "imputed_value_int64": 64, "replaced_value_int64": 64, "imputed_valu": 64, "1607": 64, "08022": 64, "isinf": 64, "detect_neg": 64, "induc": 64, "detect_posit": 64, "lrn": 64, "respons": 64, "alexnet": 64, "nip": 64, "cc": 64, "4824": 64, "imagenet": 64, "dk": 64, "square_sum": 64, "expon": 64, "forget": 64, "iofc": 64, "iof": 64, "peephol": 64, "pb": 64, "wi": 64, "ri": 64, "ct": 64, "wbi": 64, "rbi": 64, "ft": 64, "wf": 64, "pf": 64, "wbf": 64, "wc": 64, "rc": 64, "wbc": 64, "rbc": 64, "ot": 64, "wo": 64, "ro": 64, "po": 64, "wbo": 64, "rbo": 64, "input_forget": 64, "initial_c": 64, "y_c": 64, "hidde_s": 64, "keys_": 64, "values_": 64, "impli": 64, "keys_str": 64, "ami": 64, "salli": 64, "values_int64": 64, "dori": 64, "default_float": 64, "classes_str": 64, "overal": 64, "normalized_ax": 64, "dd": 64, "varep": 64, "stddev": 64, "invstddev": 64, "reciproc": 64, "stand": 64, "stash_typ": 64, "normalizedsc": 64, "leakag": 64, "009999999776482582": 64, "lessorequ": 64, "less_equ": 64, "multinomi": 64, "softmax_zero": 64, "probit": 64, "latter": 64, "linearregressor": 64, "univari": 64, "natur": 64, "logsoftmax": 64, "termin": 64, "trip": 64, "yield": 64, "tabl": 64, "summar": 64, "max_trip_count": 64, "condition_var": 64, "analog": 64, "trip_count": 64, "usag": 64, "net": 64, "keepgo": 64, "keepgoing_out": 64, "b_out": 64, "user_defined_v": 64, "keepgoing_in": 64, "incom": 64, "b_in": 64, "my_loc": 64, "outgo": 64, "const": 64, "bind": 64, "formal": 64, "subsequ": 64, "pair": 64, "eg": 64, "suppli": 64, "scan_output": 64, "semant": 64, "wavefront": 64, "devblog": 64, "nvidia": 64, "frontend": 64, "emit": 64, "success": 64, "possibli": 64, "residu": 64, "v_initi": 64, "v_final_and_scan_output": 64, "123": 64, "wish": 64, "lpnormal": 64, "kernelspatialshap": 64, "intend": 64, "legaci": 64, "author": 64, "encourag": 64, "matmulinteg": 64, "a_zero_point": 64, "wherea": 64, "zp_1": 64, "zp_2": 64, "zp_m": 64, "b_zero_point": 64, "zp_n": 64, "data_0": 64, "storage_ord": 64, "storag": 64, "maxroipool": 64, "num_roi": 64, "pooled_shap": 64, "spatial_scal": 64, "batch_id": 64, "x1": 64, "maxunpool": 64, "essenti": 64, "unpool": 64, "corrspond": 64, "output_s": 64, "disambigu": 64, "invert": 64, "meanvariancenorm": 64, "ex": 64, "cacul": 64, "melweightmatrix": 64, "linearli": 64, "frequenc": 64, "spectra": 64, "stft": 64, "num_mel_bin": 64, "lower_edge_hertz": 64, "upper_edge_hertz": 64, "mel": 64, "hertz": 64, "2595": 64, "log10": 64, "700": 64, "triangl": 64, "filterbank": 64, "peak": 64, "spectrogram": 64, "frame": 64, "num_spectrogram_bin": 64, "spectrum": 64, "band": 64, "understood": 64, "nonredund": 64, "bin": 64, "sample_r": 64, "dictat": 64, "triangular": 64, "highest": 64, "mish": 64, "monoton": 64, "ln": 64, "modulu": 64, "divisor": 64, "fmod": 64, "dividend": 64, "forc": 64, "treatment": 64, "sg": 64, "nesterov": 64, "beta_adjust": 64, "sample_s": 64, "class_siz": 64, "unnorm": 64, "flip": 64, "negativeloglikelihoodloss": 64, "likelihood": 64, "d_2": 64, "d_k": 64, "ignore_index": 64, "reduct": 64, "nllloss": 64, "weight_tot": 64, "shall": 64, "rescal": 64, "nonmaxsuppress": 64, "box": 64, "intersect": 64, "iou": 64, "overlap": 64, "score_threshold": 64, "center_point_box": 64, "invari": 64, "orthogon": 64, "thu": 64, "selected_indic": 64, "ly": 64, "mostli": 64, "x_center": 64, "y_center": 64, "num_batch": 64, "spatial_dimens": 64, "max_output_boxes_per_class": 64, "iou_threshold": 64, "decid": 64, "num_selected_indic": 64, "batch_index": 64, "class_index": 64, "box_index": 64, "infix": 64, "negat": 64, "hot": 64, "on_valu": 64, "off_valu": 64, "innermost": 64, "cats_int64": 64, "lookup": 64, "certain": 64, "optionalgetel": 64, "optionalhasel": 64, "prelu": 64, "slope": 64, "sharedacross": 64, "constant_valu": 64, "around": 64, "toru": 64, "num_ax": 64, "x1_begin": 64, "x2_begin": 64, "x1_end": 64, "x2_end": 64, "xi_begin": 64, "xi_end": 64, "chosen": 64, "input_rank": 64, "qlinearconv": 64, "w_scale": 64, "t4": 64, "zero_point": 64, "qlinearmatmul": 64, "v_1": 64, "v_2": 64, "v_m": 64, "v_k": 64, "a_scal": 64, "b_scale": 64, "quantizelinear": 64, "low": 64, "wbbi": 64, "rbbi": 64, "randomnorm": 64, "drawn": 64, "randomnormallik": 64, "randomuniform": 64, "boundari": 64, "randomuniformlik": 64, "increment": 64, "number_of_el": 64, "pseudocod": 64, "reducel1": 64, "noop_with_empty_ax": 64, "act": 64, "reducel2": 64, "reducelogsum": 64, "reducelogsumexp": 64, "reducemin": 64, "reduceprod": 64, "rectifi": 64, "unchang": 64, "neighborhood": 64, "output_dimens": 64, "input_dimens": 64, "roi_end": 64, "roi_start": 64, "antialia": 64, "cubic": 64, "antialias": 64, "downscal": 64, "achiev": 64, "stretch": 64, "resampl": 64, "coordinate_transformation_mod": 64, "x_resiz": 64, "x_origin": 64, "length_origin": 64, "length_res": 64, "output_width": 64, "fraction": 64, "output_width_int": 64, "half_pixel": 64, "half_pixel_symmetr": 64, "input_width": 64, "x_ori": 64, "pytorch_half_pixel": 64, "asymmetr": 64, "tf_crop_and_res": 64, "start_x": 64, "end_x": 64, "cubic_coeff_a": 64, "1163711": 64, "exclude_outsid": 64, "renorm": 64, "extrapolation_valu": 64, "keep_aspect_ratio_polici": 64, "regard": 64, "aspect": 64, "disregard": 64, "out_siz": 64, "not_larg": 64, "extent": 64, "in_siz": 64, "round_int": 64, "not_smal": 64, "halfwai": 64, "nearest_mod": 64, "round_prefer_floor": 64, "half": 64, "down": 64, "round_prefer_ceil": 64, "start1": 64, "startn": 64, "end1": 64, "endn": 64, "upsampl": 64, "trilinear": 64, "etc": 64, "roi_x": 64, "tf_half_pixel_for_nn": 64, "reversesequ": 64, "beyond": 64, "time_axi": 64, "batch_axi": 64, "roialign": 64, "cnn": 64, "1703": 64, "06870": 64, "output_height": 64, "misalign": 64, "output_half_pixel": 64, "recommend": 64, "sampling_ratio": 64, "roi_width": 64, "likewis": 64, "batch_indic": 64, "signal_length": 64, "frame_step": 64, "slid": 64, "window_shap": 64, "frame_length": 64, "dft_unique_bin": 64, "svmclassifi": 64, "kernel_typ": 64, "poli": 64, "prob_a": 64, "prob_b": 64, "classlabels_": 64, "bactch": 64, "svmregressor": 64, "anomali": 64, "n_support": 64, "one_class": 64, "flag": 64, "scan_input": 64, "state_vari": 64, "scan_output_el": 64, "restrict": 64, "scan_input_direct": 64, "scan_output_direct": 64, "prepend": 64, "scan_input_ax": 64, "scan_output_ax": 64, "togeth": 64, "num_scan_input": 64, "axis_1": 64, "axis_m": 64, "init_1": 64, "init_n": 64, "scan_1": 64, "scan_m": 64, "scan_i": 64, "axis_i": 64, "scan_j": 64, "axis_j": 64, "sequence_length": 64, "st_1": 64, "st_n": 64, "scan_out_1": 64, "scan_out_k": 64, "si_1": 64, "si_m": 64, "so_1": 64, "so_k": 64, "h_0": 64, "scanloop": 64, "nest": 64, "h_tminus1": 64, "x_t": 64, "t5": 64, "initial_state_and_scan_input": 64, "final_state_and_scan_output": 64, "max_sequence_length": 64, "bst_1": 64, "bst_n": 64, "meaning": 64, "b_scan_out_1": 64, "b_scan_out_k": 64, "scatter": 64, "duplic": 64, "idx1": 64, "idx2": 64, "update_indic": 64, "idx": 64, "ndindex": 64, "counter": 64, "selu": 64, "67326319217681884765625": 64, "6732632423543772848170429916717": 64, "6732631921768188": 64, "05070102214813232421875": 64, "0507009873554804934193349852946": 64, "0507010221481323": 64, "6732": 64, "673200011253357": 64, "0507": 64, "0506999492645264": 64, "sequenceat": 64, "sequenceconstruct": 64, "sequenceempti": 64, "sequenceeras": 64, "eras": 64, "sequenceinsert": 64, "sequencelength": 64, "sequencemap": 64, "additional_input": 64, "out_sequ": 64, "upto": 64, "clamp": 64, "lambd": 64, "formul": 64, "sin": 64, "sinh": 64, "negatv": 64, "accommod": 64, "int_max": 64, "int_min": 64, "softmaxcrossentropyloss": 64, "cross": 64, "entropi": 64, "l_1": 64, "l_2": 64, "l_n": 64, "j_1": 64, "j_2": 64, "j_k": 64, "l_i": 64, "ground": 64, "truth": 64, "log_prob": 64, "num_output": 64, "evenli": 64, "splittabl": 64, "chunk": 64, "splittosequ": 64, "stringnorm": 64, "clean": 64, "examin": 64, "stopword": 64, "uppercas": 64, "case_change_act": 64, "is_case_sensit": 64, "identif": 64, "sensit": 64, "subtract": 64, "tan": 64, "ngram_index": 64, "pool_int64": 64, "ngram_count": 64, "pool_str": 64, "ngram": 64, "ether": 64, "tile": 64, "output_dim": 64, "topk": 64, "largest": 64, "smallest": 64, "a_2": 64, "a_n": 64, "tiebreak": 64, "nodes_x": 64, "class_": 64, "classlabels_str": 64, "_as_tensor": 64, "nodes_": 64, "decod": 64, "target_": 64, "branch_lt": 64, "branch_gt": 64, "branch_eq": 64, "branch_neq": 64, "aggregate_funct": 64, "aggreg": 64, "trilu": 64, "subtensor": 64, "inverse_indic": 64, "attribute_sort": 64, "attribute_axi": 64, "output_i": 64, "output_indic": 64, "output_inverse_indic": 64, "output_count": 64, "unsqueez": 64, "output_rank": 64, "plu": 64, "matter": 64, "bfloat": 64, "convet": 65, "mainten": 67, "cycl": 67, "mylogisticregress": 68, "sphinxext": 68, "sphinx_runpython_extens": 68, "run_python_script_140552214879104": 68, "737": [16, 68], "resolv": 70, "455253499780156e": 3, "708922725775654e": 3, "072999999678359e": 3, "390000001170847e": 3, "2687857000173608e": 3, "6693917723816003e": 3, "7610000008971836e": 3, "0040000006483751e": 3, "93it": 3, "000009": [3, 55], "467956": 3, "780008": 3, "000065": 3, "000024": 3, "775544": 3, "000591": 3, "000193": 3, "055958": 3, "989": [3, 27], "355312850000246": 4, "29348953099997743": 4, "6831115730001329": 4, "16402971700017588": 4, "4586870539997108": 4, "15432031699992876": 4, "35262849": 5, "77470989": 5, "85001598": 5, "50263433": 5, "44689391": 5, "3526287": 5, "7747092": 5, "8500156": 5, "5026336": 5, "4468932": 5, "4812870999999177": 5, "8075209660000837": 5, "877": [5, 27], "795": [6, 27], "79113857": 7, "20886143": 7, "90867425": 7, "09132575": 7, "7911385297775269": 7, "20886144042015076": 7, "9086743593215942": 7, "09132567048072815": 7, "7911385": 7, "20886144": 7, "90867436": 7, "09132567": 7, "573": [7, 27], "94895805e": 8, "10418332e": 8, "21479166e": 8, "9948958158493042": 8, "005104185082018375": 8, "2147937766826544e": 8, "029782379046082497": 8, "9085215926170349": 8, "061696045100688934": 8, "83118948": 8, "55861193": 8, "38980141": 8, "831189155578613": 8, "558612108230591": 8, "389801025390625": 8, "3820686340332031": 8, "0358331203460693": 8, "6537656784057617": 8, "042": [8, 27], "182": [9, 27], "632": [10, 27], "9724591970443726": 11, "027540581300854683": 11, "352435330976732e": 11, "9776268601417542": 11, "022372985258698463": 11, "9038986920349998e": 11, "7245920e": 11, "7540581e": 11, "3524353e": 11, "7762686e": 11, "2372985e": 11, "9038987e": 11, "9724592": 11, "97762686": 11, "02754058": 11, "02237299": 11, "012372604999882242": 11, "004819102000055864": 11, "002799700999958077": 11, "005126302000007854": 11, "004604101999802879": 11, "0027419010000357957": 11, "0030426010000610404": 11, "0026854010000079143": 11, "0026163009999891074": 11, "0026090009998824826": 11, "0016827009999360598": 11, "0011974000001373497": 11, "0012046009999266971": 11, "0011644000001069799": 11, "0011607010001171147": 11, "0012080000001333246": 11, "001181700999950408": 11, "0011657999998533342": 11, "0013147009999556758": 11, "0015800999999555643": 11, "00204040099993108": 11, "0020714009999664995": 11, "0020063009999375936": 11, "001988400999834994": 11, "0019787999999607564": 11, "0019756009999127855": 11, "0021800010001697956": 11, "002363000999821452": 11, "0021929009999439586": 11, "002378501000066535": 11, "153": [11, 27], "1081381": 12, "09305604": 12, "05512788": 12, "40800577": 12, "419": [12, 27], "15388734e": 13, "84445827e": 13, "65438648e": 13, "62653409e": 13, "37139236e": 13, "07354284e": 13, "04537964e": 13, "53809626e": 13, "10733209e": 13, "98573169e": 13, "25151760e": 13, "24990923e": 13, "20811905e": 13, "87065374e": 13, "09726507e": 13, "1538868e": 13, "8444584e": 13, "6544168e": 13, "6265337e": 13, "3713925e": 13, "0727392e": 13, "0453798e": 13, "5380947e": 13, "1110506e": 13, "9857341e": 13, "2515183e": 13, "2499082e": 13, "2081197e": 13, "8706535e": 13, "0972649e": 13, "3160": 13, "57906777e": 14, "91959240e": 14, "07382854e": 14, "72054450e": 14, "27893759e": 14, "17916588e": 14, "53658204e": 14, "46046803e": 14, "94993135e": 14, "73275979e": 14, "98725606e": 14, "01001118e": 14, "70430915e": 14, "95535348e": 14, "55504186e": 14, "5787166e": 14, "9195931e": 14, "0738282e": 14, "7205452e": 14, "2789373e": 14, "1797400e": 14, "5365826e": 14, "4604677e": 14, "9501395e": 14, "7327205e": 14, "9872569e": 14, "0100102e": 14, "7043103e": 14, "9553531e": 14, "5552911e": 14, "812": [14, 27], "155": 16, "86157227": 16, "161": 16, "37359619": 16, "169": 16, "98284912": 16, "163": 16, "67077637": 16, "179": 16, "60345459": 16, "92480469": 16, "47344971": 16, "94818115": 16, "131": 16, "79040527": 16, "178": 16, "3425293": 16, "270336": 16, "270230": 16, "27374268": 16, "270231": 16, "13311768": 16, "22369385": 16, "270233": 16, "97131348": 16, "270236": 16, "3505249": 16, "346": 16, "86221577": 16, "0048411": 16, "00493731": 16, "00496565": 16, "00522617": 16, "00536676": 16, "37604364": 16, "97800802": 16, "66931185": 16, "59932504": 16, "589": 16, "99334399": 16, "502": [16, 56, 58], "71732585": 16, "60174561": 16, "01325219": 16, "00510439": 16, "00620633": 16, "0110021": 16, "00937738": 16, "717": 16, "44499291": 16, "726": 16, "51668442": 16, "729": 16, "78735343": 16, "89521503": 16, "840": 16, "48277943": 16, "592": [16, 27], "76688174": 17, "23311826": 17, "9036112427711487": 17, "09638875722885132": 17, "xcdlga": 17, "xde": 17, "xad": 17, "x88w": 17, "x9b": 17, "x98": 17, "xca": 17, "x97": 17, "19102357": 17, "4848954": 17, "456250190734863": 17, "483604431152344": 17, "919227600097656": 17, "07591158151626587": 17, "018529221415519714": 17, "4584773778915405": 17, "025607185438275337": 17, "3215447962284088": 17, "1725417822599411": 17, "3893167972564697": 17, "24189656972885132": 17, "226542353630066": 17, "2249596118927002": 17, "034958839416504": 17, "013846348039805889": 17, "047222375869751": 17, "2818778455257416": 17, "105": [17, 27], "274": [18, 27], "4499998092651367": 19, "799999952316284": 19, "f5254896c3294cbca33a33798809c1db": 19, "8405ded92a3940539213ca75ace4f64": 19, "92d6517f5fb64e68bc6614269bf01c28": 19, "092": [19, 27], "8940573361824338": 21, "30724412295249387": 21, "10910977984398254": 21, "106606957298055": 21, "19083366185515177": 21, "0146708954543895": 21, "9848905236525014": 21, "9470286680912169": 21, "15362206147624693": 21, "05455488992199127": 21, "9596576690673828": 21, "3379417061805725": 21, "884244441986084": 21, "8582920432090759": 21, "9091630578041077": 21, "5646940469741821": 21, "45458152890205383": 21, "12712430953979492": 21, "1475005149841309": 21, "06356215476989746": 21, "8940574": 21, "30724412": 21, "10910978": 21, "106607": 21, "19083367": 21, "014671": 21, "9848906": 21, "9470287": 21, "15362206": 21, "05455489": 21, "368": [21, 27], "010": [22, 27], "124": [23, 27], "51995794": 24, "24549283": 24, "23454923": 24, "519957959651947": 24, "2454928159713745": 24, "23454922437667847": 24, "669": [24, 27], "691": [25, 27], "285": 26, "4384377896785736": 26, "561562180519104": 26, "71903792": 26, "28096208": 26, "4939": 26, "979": [26, 27], "683": 27, "14449": 29, "54984": 29, "3338": 29, "55504": 29, "816086": 29, "14448115": 29, "54983764": 29, "33379987": 29, "55502653": 29, "81608297": 29, "225058631211141e": 29, "2777930441858178e": 29, "n1_treeensembleregressor_1": 29, "n2_linearregressor": 29, "079": [29, 58], "007118": 30, "001354": 30, "005786": 30, "012553": 30, "006686": 30, "001005": 30, "005542": 30, "010000": 30, "000669": 30, "037563": 30, "003221": 30, "033256": 30, "045436": 30, "000038": 30, "162250": 30, "008343": 30, "153190": 30, "177519": 30, "000016": [30, 49], "007177": 30, "000710": 30, "005820": 30, "010053": 30, "000026": 30, "000077": 30, "006645": 30, "000843": 30, "005670": 30, "008772": 30, "000664": 30, "034182": 30, "001519": 30, "032991": 30, "038628": 30, "000080": 30, "155520": 30, "003090": 30, "151416": 30, "160624": 30, "000003": [30, 49], "000014": 30, "146": [30, 58], "173": [31, 58], "529556274414062": 32, "06639961153268814": 32, "2243356704711914": 32, "1448548436164856": 32, "0675677061080933": 32, "4722660183906555": 32, "237240314483643": 32, "5116932392120361": 32, "8247517943382263": 32, "144781589508057": 32, "4886579513549805": 32, "7253645658493042": 32, "3656964302062988": 32, "8799896836280823": 32, "0190058946609497": 32, "49304017424583435": 32, "5850936770439148": 32, "2917989492416382": 32, "957119941711426": 32, "862874507904053": 32, "656042575836182": 32, "188352584838867": 32, "790811538696289": 32, "259439468383789": 32, "948318481445312": 32, "675846576690674": 32, "819464921951294": 32, "20181941986084": 32, "639735460281372": 32, "102": 32, "97": 32, "98": 32, "84586238861084": 32, "3479835987091064": 32, "3570774793624878": 32, "2792494297027588": 32, "8155440092086792": 32, "14481677114963531": 32, "6906545162200928": 32, "7135818004608154": 32, "9292002320289612": 32, "0187170505523682": 32, "3404710292816162": 32, "343726634979248": 32, "8512831926345825": 32, "5900801420211792": 32, "019533302634954453": 32, "626607060432434": 32, "19708473980426788": 32, "966790199279785": 32, "8903709650039673": 32, "3281314373016357": 32, "2592618465423584": 32, "113555669784546": 32, "04144952446222305": 32, "7720679640769958": 32, "6698925495147705": 32, "359083414077759": 32, "819303274154663": 32, "8522212505340576": 32, "8754918575286865": 32, "76548433303833": 32, "5094605684280396": 32, "876166343688965": 32, "066214084625244": 32, "31315803527832": 32, "141677379608154": 32, "081350326538086": 32, "860238075256348": 32, "618461608886719": 32, "555417537689209": 32, "563416481018066": 32, "313610076904297": 32, "703146934509277": 32, "049132823944092": 32, "672673225402832": 32, "863733291625977": 32, "3013410568237305": 32, "4979329109191895": 32, "212557315826416": 32, "996405601501465": 32, "734021186828613": 32, "765551567077637": 32, "2054523527622223": 32, "720078945159912": 32, "2732791900634766": 32, "630293369293213": 32, "411093711853027": 32, "3866586685180664": 32, "608852386474609": 32, "734891891479492": 32, "10917384922504425": 32, "2374401092529297": 32, "827874481678009": 32, "071647882461548": 32, "102121114730835": 32, "0763232707977295": 32, "6638698577880859": 32, "8181525468826294": 32, "10006757825613022": 32, "082978248596191": 32, "585964202880859": 32, "586073875427246": 32, "862358570098877": 32, "337919235229492": 32, "677865743637085": 32, "8631792068481445": 32, "906221628189087": 32, "509670257568359": 32, "576967477798462": 32, "915074348449707": 32, "103733777999878": 32, "5658510327339172": 32, "2207704782485962": 32, "022815637290477753": 32, "3473073244094849": 32, "9728590846061707": 32, "4070947468280792": 32, "6099328994750977": 32, "345": [32, 58], "10110": 33, "20177945": 34, "55755314": 34, "50589005": 34, "95852584": 34, "41493749": 34, "2017796": 34, "5575526": 34, "5058892": 34, "9585238": 34, "4149387": 34, "958523": 34, "4149382": 34, "23519209400001273": 34, "24321569299991097": 34, "02909434586763382": 35, "9069037437438965": 35, "06400192528963089": 35, "001469946000725031": 35, "6960583329200745": 35, "30247175693511963": 35, "02909435": 35, "90690374": 35, "06400193": 35, "00146995": 35, "69605833": 35, "30247176": 35, "003929830000015499": 35, "0014273400999172737": 35, "002505180000025575": 35, "002399530100001357": 35, "5487159e": 35, "5127936e": 35, "7449998e": 35, "7273737e": 35, "7262522e": 35, "5831836e": 35, "6799171e": 35, "2008070e": 35, "2276080e": 35, "2772932e": 35, "8738047e": 35, "0638156e": 35, "8342626e": 35, "3462042e": 35, "5695420e": 35, "4909793e": 35, "9619307e": 35, "9595613e": 35, "4299725e": 35, "8941692e": 35, "3967523e": 35, "0x7ff2658ec880": 36, "191": 36, "7381674075899": 36, "336255213062701": 36, "884853452338575e": 36, "703614863001166e": 36, "xcf": 36, "x8b": 36, "xb9": 36, "ulp": 36, "x8fg": 36, "xe97": 36, "xeb": 36, "xac": 36, "ky": 36, "xf0": 36, "xcc": 36, "x91": 36, "xa7": 36, "x84": 36, "x19": 36, "x05": 36, "x92": 36, "xfe": 36, "x81": 36, "y4": 36, "7036148848472986e": 36, "838": [36, 58], "97220157": 37, "12241924": 37, "99787307": 37, "67093263": 37, "17484361": 37, "97003927": 37, "1789179": 37, "87439521": 37, "09527298": 37, "30139711": 37, "88570212": 37, "51109541": 37, "19951064": 37, "14534167": 37, "95532767": 37, "99164562": 37, "38817034": 37, "91632391": 37, "03786574": 37, "79062164": 37, "90506362": 37, "3258656": 37, "06107053": 37, "04773291": 37, "14433414": 37, "80131879": 37, "61165831": 37, "20416659": 37, "13289816": 37, "89627182": 37, "48651213": 37, "50815311": 37, "01625262": 37, "61501306": 37, "89697041": 37, "05706539": 37, "71098466": 37, "35219798": 37, "086616": 37, "50365186": 37, "64546316": 37, "81135074": 37, "93005961": 37, "28039115": 37, "49472411": 37, "58831498": 37, "56212928": 37, "55875855": 37, "97077425": 37, "96339957": 37, "86402575": 37, "5530338": 37, "77367018": 37, "82754129": 37, "87807792": 37, "06173653": 37, "86275216": 37, "95552455": 37, "07994174": 37, "87333352": 37, "99861021": 37, "72803208": 37, "55568868": 37, "73641792": 37, "49114086": 37, "12348735": 37, "43501224": 37, "23530312": 37, "95950739": 37, "71091534": 37, "00072575": 37, "86311402": 37, "78300091": 37, "0615144": 37, "23490423": 37, "40343436": 37, "40153286": 37, "99955514": 37, "89069251": 37, "95527858": 37, "9951345": 37, "12357878": 37, "62328438": 37, "88854512": 37, "40353892": 37, "14807862": 37, "68912032": 37, "86147193": 37, "8939904": 37, "34925913": 37, "99460105": 37, "64670109": 37, "12768297": 37, "53518427": 37, "21738173": 37, "99333175": 37, "95835329": 37, "34508059": 37, "95372786": 37, "90117552": 37, "93688791": 37, "92126695": 37, "97602613": 37, "43652719": 37, "70684331": 37, "70536445": 37, "68055347": 37, "9535028": 37, "0882743": 37, "93174089": 37, "79517817": 37, "36129564": 37, "99454069": 37, "60954394": 37, "2941058": 37, "4844702": 37, "11736796": 37, "13778849": 37, "41310146": 37, "76189431": 37, "71811095": 37, "95510958": 37, "44115032": 37, "78733906": 37, "72209616": 37, "25528011": 37, "26899484": 37, "68046933": 37, "03366801": 37, "7386478": 37, "96655433": 37, "48632293": 37, "66143984": 37, "80567208": 37, "1051166": 37, "57184189": 37, "32028627": 37, "19832221": 37, "44625761": 37, "66720431": 37, "17881254": 37, "35243001": 37, "61047248": 37, "54870182": 37, "63287534": 37, "80222733": 37, "52405623": 37, "59281379": 37, "27009395": 37, "92691417": 37, "00782039": 37, "24582165": 37, "07063185": 37, "28841255": 37, "49696775": 37, "44058261": 37, "32774484": 37, "60609916": 37, "42011876": 37, "75979573": 37, "67031866": 37, "88332283": 37, "74916442": 37, "97295414": 37, "00733004": 37, "39450078": 37, "40409915": 37, "55254609": 37, "87359817": 37, "90409172": 37, "38942931": 37, "72093827": 37, "75864765": 37, "50231003": 37, "18888648": 37, "06440319": 37, "85885082": 37, "82259848": 37, "39150476": 37, "58180763": 37, "24875062": 37, "47613928": 37, "23068597": 37, "86151719": 37, "0971052": 37, "38623038": 37, "33176642": 37, "51536817": 37, "03396275": 37, "29666246": 37, "81194891": 37, "82460449": 37, "78545021": 37, "57017344": 37, "37597729": 37, "74674716": 37, "04410524": 37, "27214703": 37, "8789993": 37, "3839694": 37, "07518249": 37, "3231268": 37, "50664789": 37, "76586458": 37, "53503117": 37, "16460668": 37, "75898803": 37, "19079906": 37, "53772877": 37, "74890266": 37, "21258624": 37, "79880246": 37, "73154786": 37, "20493915": 37, "92230348": 37, "26064879": 37, "39208869": 37, "51591765": 37, "50326811": 37, "24565892": 37, "4769421": 37, "73398281": 37, "01951679": 37, "11653632": 37, "28300998": 37, "8209098": 37, "90771307": 37, "94077286": 37, "49665868": 37, "39463942": 37, "69648261": 37, "91923245": 37, "44800197": 37, "41396325": 37, "11710726": 37, "87020286": 37, "8369882": 37, "97188789": 37, "35409464": 37, "17309015": 37, "54832687": 37, "29006262": 37, "61278191": 37, "90897245": 37, "47513288": 37, "79625003": 37, "14174615": 37, "86066075": 37, "53620233": 37, "1727675": 37, "67208548": 37, "35489518": 37, "06663455": 37, "81175026": 37, "93494533": 37, "03569718": 37, "83553248": 37, "39558948": 37, "74854302": 37, "05837295": 37, "07047627": 37, "30034327": 37, "18621697": 37, "90678521": 37, "87437682": 37, "48399598": 37, "0947599": 37, "81512611": 37, "58049956": 37, "81393069": 37, "27157117": 37, "87804494": 37, "08615557": 37, "32144511": 37, "50258278": 37, "4859412": 37, "0625801": 37, "98183333": 37, "24502791": 37, "9903383": 37, "59235157": 37, "0929821": 37, "52380862": 37, "08177428": 37, "56488438": 37, "26218919": 37, "87242105": 37, "43867573": 37, "53197842": 37, "84004429": 37, "50771114": 37, "03723854": 37, "43328422": 37, "46435048": 37, "09439379": 37, "186898": 37, "12558627": 37, "0145515": 37, "0029560327529907": 37, "131404995918274": 37, "877023220062256": 37, "727529525756836": 37, "982987403869629": 37, "997872829437256": 37, "9722016": 37, "1224194": 37, "9978728": 37, "6709325": 37, "255458": 37, "002956": 37, "131405": 37, "07903422": 37, "98537153": 37, "9990883": 37, "06881714": 37, "89339954": 37, "3452218": 37, "284393": 37, "14673": 37, "370349": 37, "877023": 37, "2804183": 37, "522927": 37, "746546": 37, "72753": 37, "130924": 37, "778385": 37, "749502": 37, "982987": 37, "13388": 37, "505": [37, 58], "9804100e": 38, "3561210e": 38, "0635902e": 38, "8617423e": 38, "3825771e": 38, "3083460e": 38, "040": [38, 58], "0x7ff2739429b0": 39, "20999998": 39, "34000003": 39, "65999997": 39, "27999997": 39, "71000004": 39, "28999996": 39, "78999996": 39, "21000002": 39, "34000033": 39, "65999967": 39, "2800004": 39, "7199996": 39, "5764": 40, "6655": 40, "09769": 40, "81168": 40, "33986": 40, "555651": 40, "44836": 40, "22372": 40, "38617": 40, "210175": 40, "8613": 40, "339687": 40, "87177": 40, "2728": 40, "68564": 40, "42897": 40, "91207": 40, "218216": 40, "60468": 40, "45862": 40, "90256": 40, "02576": 40, "727": [40, 58], "00081508": 41, "23934081": 41, "75984406": 41, "01008135": 41, "7703535": 41, "21956511": 41, "063": [41, 58], "8233591": 42, "7m": 42, "109m": 42, "6635820": 42, "8m": 42, "9m": 42, "5885989": 42, "0x7ff3015be440": 42, "09983726": 42, "22940648": 42, "67075626": 42, "09983726590871811": 42, "22940650582313538": 42, "6707562804222107": 42, "27391998": 43, "27510369": 43, "45097633": 43, "2739199697971344": 43, "27510371804237366": 43, "4509763717651367": 43, "542": [43, 58], "0002170055282193992": 44, "3252795611109084e": 44, "01714456033134": 44, "7663594098493718e": 44, "081887920419746e": 44, "731420010915899": 44, "637800576000245": 44, "7089149429998542": 44, "396380e": 44, "127739e": 44, "453829e": 44, "785900e": 44, "081888e": 44, "457641e": 44, "130784e": 44, "680156e": 44, "390029e": 44, "001937e": 44, "867": [44, 58], "1758379": 45, "43438542": 45, "3897767": 45, "175837904214859": 45, "43438541889190674": 45, "38977670669555664": 45, "99487": 45, "04601": 45, "94355": 45, "82603": 45, "028": [47, 58], "000001": 49, "000015": 49, "00001": 49, "000041": 49, "047": [51, 58], "229": [52, 58], "030": [54, 58], "56it": 55, "278442": 55, "43it": 55, "000006": 55, "84it": 55, "20it": 55, "336951": 55, "50it": 55, "67it": 55, "057222": 55, "807": [55, 58], "361": [57, 58], "854": 58}, "objects": {"skl2onnx._parse": [[0, 0, 1, "", "parse_sklearn"], [0, 0, 1, "", "parse_sklearn_model"]], "skl2onnx.algebra.onnx_ops": [[64, 1, 1, "", "OnnxAbs"], [64, 1, 1, "", "OnnxAbs_1"], [64, 1, 1, "", "OnnxAbs_13"], [64, 1, 1, "", "OnnxAbs_6"], [64, 1, 1, "", "OnnxAcos"], [64, 1, 1, "", "OnnxAcos_7"], [64, 1, 1, "", "OnnxAcosh"], [64, 1, 1, "", "OnnxAcosh_9"], [64, 1, 1, "", "OnnxAdagrad"], [64, 1, 1, "", "OnnxAdagrad_1"], [64, 1, 1, "", "OnnxAdam"], [64, 1, 1, "", "OnnxAdam_1"], [64, 1, 1, "", "OnnxAdd"], [64, 1, 1, "", "OnnxAdd_1"], [64, 1, 1, "", "OnnxAdd_13"], [64, 1, 1, "", "OnnxAdd_14"], [64, 1, 1, "", "OnnxAdd_6"], [64, 1, 1, "", "OnnxAdd_7"], [64, 1, 1, "", "OnnxAnd"], [64, 1, 1, "", "OnnxAnd_1"], [64, 1, 1, "", "OnnxAnd_7"], [64, 1, 1, "", "OnnxArgMax"], [64, 1, 1, "", "OnnxArgMax_1"], [64, 1, 1, "", "OnnxArgMax_11"], [64, 1, 1, "", "OnnxArgMax_12"], [64, 1, 1, "", "OnnxArgMax_13"], [64, 1, 1, "", "OnnxArgMin"], [64, 1, 1, "", "OnnxArgMin_1"], [64, 1, 1, "", "OnnxArgMin_11"], [64, 1, 1, "", "OnnxArgMin_12"], [64, 1, 1, "", "OnnxArgMin_13"], [64, 1, 1, "", "OnnxArrayFeatureExtractor"], [64, 1, 1, "", "OnnxArrayFeatureExtractor_1"], [64, 1, 1, "", "OnnxAsin"], [64, 1, 1, "", "OnnxAsin_7"], [64, 1, 1, "", "OnnxAsinh"], [64, 1, 1, "", "OnnxAsinh_9"], [64, 1, 1, "", "OnnxAtan"], [64, 1, 1, "", "OnnxAtan_7"], [64, 1, 1, "", "OnnxAtanh"], [64, 1, 1, "", "OnnxAtanh_9"], [64, 1, 1, "", "OnnxAveragePool"], [64, 1, 1, "", "OnnxAveragePool_1"], [64, 1, 1, "", "OnnxAveragePool_10"], [64, 1, 1, "", "OnnxAveragePool_11"], [64, 1, 1, "", "OnnxAveragePool_19"], [64, 1, 1, "", "OnnxAveragePool_7"], [64, 1, 1, "", "OnnxBatchNormalization"], [64, 1, 1, "", "OnnxBatchNormalization_1"], [64, 1, 1, "", "OnnxBatchNormalization_14"], [64, 1, 1, "", "OnnxBatchNormalization_15"], [64, 1, 1, "", "OnnxBatchNormalization_6"], [64, 1, 1, "", "OnnxBatchNormalization_7"], [64, 1, 1, "", "OnnxBatchNormalization_9"], [64, 1, 1, "", "OnnxBernoulli"], [64, 1, 1, "", "OnnxBernoulli_15"], [64, 1, 1, "", "OnnxBinarizer"], [64, 1, 1, "", "OnnxBinarizer_1"], [64, 1, 1, "", "OnnxBitShift"], [64, 1, 1, "", "OnnxBitShift_11"], [64, 1, 1, "", "OnnxBitwiseAnd"], [64, 1, 1, "", "OnnxBitwiseAnd_18"], [64, 1, 1, "", "OnnxBitwiseNot"], [64, 1, 1, "", "OnnxBitwiseNot_18"], [64, 1, 1, "", "OnnxBitwiseOr"], [64, 1, 1, "", "OnnxBitwiseOr_18"], [64, 1, 1, "", "OnnxBitwiseXor"], [64, 1, 1, "", "OnnxBitwiseXor_18"], [64, 1, 1, "", "OnnxBlackmanWindow"], [64, 1, 1, "", "OnnxBlackmanWindow_17"], [64, 1, 1, "", "OnnxCast"], [64, 1, 1, "", "OnnxCastLike"], [64, 1, 1, "", "OnnxCastLike_15"], [64, 1, 1, "", "OnnxCastMap"], [64, 1, 1, "", "OnnxCastMap_1"], [64, 1, 1, "", "OnnxCast_1"], [64, 1, 1, "", "OnnxCast_13"], [64, 1, 1, "", "OnnxCast_6"], [64, 1, 1, "", "OnnxCast_9"], [64, 1, 1, "", "OnnxCategoryMapper"], [64, 1, 1, "", "OnnxCategoryMapper_1"], [64, 1, 1, "", "OnnxCeil"], [64, 1, 1, "", "OnnxCeil_1"], [64, 1, 1, "", "OnnxCeil_13"], [64, 1, 1, "", "OnnxCeil_6"], [64, 1, 1, "", "OnnxCelu"], [64, 1, 1, "", "OnnxCelu_12"], [64, 1, 1, "", "OnnxCenterCropPad"], [64, 1, 1, "", "OnnxCenterCropPad_18"], [64, 1, 1, "", "OnnxClip"], [64, 1, 1, "", "OnnxClip_1"], [64, 1, 1, "", "OnnxClip_11"], [64, 1, 1, "", "OnnxClip_12"], [64, 1, 1, "", "OnnxClip_13"], [64, 1, 1, "", "OnnxClip_6"], [64, 1, 1, "", "OnnxCol2Im"], [64, 1, 1, "", "OnnxCol2Im_18"], [64, 1, 1, "", "OnnxCompress"], [64, 1, 1, "", "OnnxCompress_11"], [64, 1, 1, "", "OnnxCompress_9"], [64, 1, 1, "", "OnnxConcat"], [64, 1, 1, "", "OnnxConcatFromSequence"], [64, 1, 1, "", "OnnxConcatFromSequence_11"], [64, 1, 1, "", "OnnxConcat_1"], [64, 1, 1, "", "OnnxConcat_11"], [64, 1, 1, "", "OnnxConcat_13"], [64, 1, 1, "", "OnnxConcat_4"], [64, 1, 1, "", "OnnxConstant"], [64, 1, 1, "", "OnnxConstantOfShape"], [64, 1, 1, "", "OnnxConstantOfShape_9"], [64, 1, 1, "", "OnnxConstant_1"], [64, 1, 1, "", "OnnxConstant_11"], [64, 1, 1, "", "OnnxConstant_12"], [64, 1, 1, "", "OnnxConstant_13"], [64, 1, 1, "", "OnnxConstant_9"], [64, 1, 1, "", "OnnxConv"], [64, 1, 1, "", "OnnxConvInteger"], [64, 1, 1, "", "OnnxConvInteger_10"], [64, 1, 1, "", "OnnxConvTranspose"], [64, 1, 1, "", "OnnxConvTranspose_1"], [64, 1, 1, "", "OnnxConvTranspose_11"], [64, 1, 1, "", "OnnxConv_1"], [64, 1, 1, "", "OnnxConv_11"], [64, 1, 1, "", "OnnxCos"], [64, 1, 1, "", "OnnxCos_7"], [64, 1, 1, "", "OnnxCosh"], [64, 1, 1, "", "OnnxCosh_9"], [64, 1, 1, "", "OnnxCumSum"], [64, 1, 1, "", "OnnxCumSum_11"], [64, 1, 1, "", "OnnxCumSum_14"], [64, 1, 1, "", "OnnxDFT"], [64, 1, 1, "", "OnnxDFT_17"], [64, 1, 1, "", "OnnxDepthToSpace"], [64, 1, 1, "", "OnnxDepthToSpace_1"], [64, 1, 1, "", "OnnxDepthToSpace_11"], [64, 1, 1, "", "OnnxDepthToSpace_13"], [64, 1, 1, "", "OnnxDequantizeLinear"], [64, 1, 1, "", "OnnxDequantizeLinear_10"], [64, 1, 1, "", "OnnxDequantizeLinear_13"], [64, 1, 1, "", "OnnxDet"], [64, 1, 1, "", "OnnxDet_11"], [64, 1, 1, "", "OnnxDictVectorizer"], [64, 1, 1, "", "OnnxDictVectorizer_1"], [64, 1, 1, "", "OnnxDiv"], [64, 1, 1, "", "OnnxDiv_1"], [64, 1, 1, "", "OnnxDiv_13"], [64, 1, 1, "", "OnnxDiv_14"], [64, 1, 1, "", "OnnxDiv_6"], [64, 1, 1, "", "OnnxDiv_7"], [64, 1, 1, "", "OnnxDropout"], [64, 1, 1, "", "OnnxDropout_1"], [64, 1, 1, "", "OnnxDropout_10"], [64, 1, 1, "", "OnnxDropout_12"], [64, 1, 1, "", "OnnxDropout_13"], [64, 1, 1, "", "OnnxDropout_6"], [64, 1, 1, "", "OnnxDropout_7"], [64, 1, 1, "", "OnnxDynamicQuantizeLinear"], [64, 1, 1, "", "OnnxDynamicQuantizeLinear_11"], [64, 1, 1, "", "OnnxEinsum"], [64, 1, 1, "", "OnnxEinsum_12"], [64, 1, 1, "", "OnnxElu"], [64, 1, 1, "", "OnnxElu_1"], [64, 1, 1, "", "OnnxElu_6"], [64, 1, 1, "", "OnnxEqual"], [64, 1, 1, "", "OnnxEqual_1"], [64, 1, 1, "", "OnnxEqual_11"], [64, 1, 1, "", "OnnxEqual_13"], [64, 1, 1, "", "OnnxEqual_19"], [64, 1, 1, "", "OnnxEqual_7"], [64, 1, 1, "", "OnnxErf"], [64, 1, 1, "", "OnnxErf_13"], [64, 1, 1, "", "OnnxErf_9"], [64, 1, 1, "", "OnnxExp"], [64, 1, 1, "", "OnnxExp_1"], [64, 1, 1, "", "OnnxExp_13"], [64, 1, 1, "", "OnnxExp_6"], [64, 1, 1, "", "OnnxExpand"], [64, 1, 1, "", "OnnxExpand_13"], [64, 1, 1, "", "OnnxExpand_8"], [64, 1, 1, "", "OnnxEyeLike"], [64, 1, 1, "", "OnnxEyeLike_9"], [64, 1, 1, "", "OnnxFeatureVectorizer"], [64, 1, 1, "", "OnnxFeatureVectorizer_1"], [64, 1, 1, "", "OnnxFlatten"], [64, 1, 1, "", "OnnxFlatten_1"], [64, 1, 1, "", "OnnxFlatten_11"], [64, 1, 1, "", "OnnxFlatten_13"], [64, 1, 1, "", "OnnxFlatten_9"], [64, 1, 1, "", "OnnxFloor"], [64, 1, 1, "", "OnnxFloor_1"], [64, 1, 1, "", "OnnxFloor_13"], [64, 1, 1, "", "OnnxFloor_6"], [64, 1, 1, "", "OnnxGRU"], [64, 1, 1, "", "OnnxGRU_1"], [64, 1, 1, "", "OnnxGRU_14"], [64, 1, 1, "", "OnnxGRU_3"], [64, 1, 1, "", "OnnxGRU_7"], [64, 1, 1, "", "OnnxGather"], [64, 1, 1, "", "OnnxGatherElements"], [64, 1, 1, "", "OnnxGatherElements_11"], [64, 1, 1, "", "OnnxGatherElements_13"], [64, 1, 1, "", "OnnxGatherND"], [64, 1, 1, "", "OnnxGatherND_11"], [64, 1, 1, "", "OnnxGatherND_12"], [64, 1, 1, "", "OnnxGatherND_13"], [64, 1, 1, "", "OnnxGather_1"], [64, 1, 1, "", "OnnxGather_11"], [64, 1, 1, "", "OnnxGather_13"], [64, 1, 1, "", "OnnxGemm"], [64, 1, 1, "", "OnnxGemm_1"], [64, 1, 1, "", "OnnxGemm_11"], [64, 1, 1, "", "OnnxGemm_13"], [64, 1, 1, "", "OnnxGemm_6"], [64, 1, 1, "", "OnnxGemm_7"], [64, 1, 1, "", "OnnxGemm_9"], [64, 1, 1, "", "OnnxGlobalAveragePool"], [64, 1, 1, "", "OnnxGlobalAveragePool_1"], [64, 1, 1, "", "OnnxGlobalLpPool"], [64, 1, 1, "", "OnnxGlobalLpPool_1"], [64, 1, 1, "", "OnnxGlobalLpPool_2"], [64, 1, 1, "", "OnnxGlobalMaxPool"], [64, 1, 1, "", "OnnxGlobalMaxPool_1"], [64, 1, 1, "", "OnnxGradient"], [64, 1, 1, "", "OnnxGradient_1"], [64, 1, 1, "", "OnnxGreater"], [64, 1, 1, "", "OnnxGreaterOrEqual"], [64, 1, 1, "", "OnnxGreaterOrEqual_12"], [64, 1, 1, "", "OnnxGreaterOrEqual_16"], [64, 1, 1, "", "OnnxGreater_1"], [64, 1, 1, "", "OnnxGreater_13"], [64, 1, 1, "", "OnnxGreater_7"], [64, 1, 1, "", "OnnxGreater_9"], [64, 1, 1, "", "OnnxGridSample"], [64, 1, 1, "", "OnnxGridSample_16"], [64, 1, 1, "", "OnnxGroupNormalization"], [64, 1, 1, "", "OnnxGroupNormalization_18"], [64, 1, 1, "", "OnnxHammingWindow"], [64, 1, 1, "", "OnnxHammingWindow_17"], [64, 1, 1, "", "OnnxHannWindow"], [64, 1, 1, "", "OnnxHannWindow_17"], [64, 1, 1, "", "OnnxHardSigmoid"], [64, 1, 1, "", "OnnxHardSigmoid_1"], [64, 1, 1, "", "OnnxHardSigmoid_6"], [64, 1, 1, "", "OnnxHardSwish"], [64, 1, 1, "", "OnnxHardSwish_14"], [64, 1, 1, "", "OnnxHardmax"], [64, 1, 1, "", "OnnxHardmax_1"], [64, 1, 1, "", "OnnxHardmax_11"], [64, 1, 1, "", "OnnxHardmax_13"], [64, 1, 1, "", "OnnxIdentity"], [64, 1, 1, "", "OnnxIdentity_1"], [64, 1, 1, "", "OnnxIdentity_13"], [64, 1, 1, "", "OnnxIdentity_14"], [64, 1, 1, "", "OnnxIdentity_16"], [64, 1, 1, "", "OnnxIf"], [64, 1, 1, "", "OnnxIf_1"], [64, 1, 1, "", "OnnxIf_11"], [64, 1, 1, "", "OnnxIf_13"], [64, 1, 1, "", "OnnxIf_16"], [64, 1, 1, "", "OnnxImputer"], [64, 1, 1, "", "OnnxImputer_1"], [64, 1, 1, "", "OnnxInstanceNormalization"], [64, 1, 1, "", "OnnxInstanceNormalization_1"], [64, 1, 1, "", "OnnxInstanceNormalization_6"], [64, 1, 1, "", "OnnxIsInf"], [64, 1, 1, "", "OnnxIsInf_10"], [64, 1, 1, "", "OnnxIsNaN"], [64, 1, 1, "", "OnnxIsNaN_13"], [64, 1, 1, "", "OnnxIsNaN_9"], [64, 1, 1, "", "OnnxLRN"], [64, 1, 1, "", "OnnxLRN_1"], [64, 1, 1, "", "OnnxLRN_13"], [64, 1, 1, "", "OnnxLSTM"], [64, 1, 1, "", "OnnxLSTM_1"], [64, 1, 1, "", "OnnxLSTM_14"], [64, 1, 1, "", "OnnxLSTM_7"], [64, 1, 1, "", "OnnxLabelEncoder"], [64, 1, 1, "", "OnnxLabelEncoder_1"], [64, 1, 1, "", "OnnxLabelEncoder_2"], [64, 1, 1, "", "OnnxLayerNormalization"], [64, 1, 1, "", "OnnxLayerNormalization_17"], [64, 1, 1, "", "OnnxLeakyRelu"], [64, 1, 1, "", "OnnxLeakyRelu_1"], [64, 1, 1, "", "OnnxLeakyRelu_16"], [64, 1, 1, "", "OnnxLeakyRelu_6"], [64, 1, 1, "", "OnnxLess"], [64, 1, 1, "", "OnnxLessOrEqual"], [64, 1, 1, "", "OnnxLessOrEqual_12"], [64, 1, 1, "", "OnnxLessOrEqual_16"], [64, 1, 1, "", "OnnxLess_1"], [64, 1, 1, "", "OnnxLess_13"], [64, 1, 1, "", "OnnxLess_7"], [64, 1, 1, "", "OnnxLess_9"], [64, 1, 1, "", "OnnxLinearClassifier"], [64, 1, 1, "", "OnnxLinearClassifier_1"], [64, 1, 1, "", "OnnxLinearRegressor"], [64, 1, 1, "", "OnnxLinearRegressor_1"], [64, 1, 1, "", "OnnxLog"], [64, 1, 1, "", "OnnxLogSoftmax"], [64, 1, 1, "", "OnnxLogSoftmax_1"], [64, 1, 1, "", "OnnxLogSoftmax_11"], [64, 1, 1, "", "OnnxLogSoftmax_13"], [64, 1, 1, "", "OnnxLog_1"], [64, 1, 1, "", "OnnxLog_13"], [64, 1, 1, "", "OnnxLog_6"], [64, 1, 1, "", "OnnxLoop"], [64, 1, 1, "", "OnnxLoop_1"], [64, 1, 1, "", "OnnxLoop_11"], [64, 1, 1, "", "OnnxLoop_13"], [64, 1, 1, "", "OnnxLoop_16"], [64, 1, 1, "", "OnnxLpNormalization"], [64, 1, 1, "", "OnnxLpNormalization_1"], [64, 1, 1, "", "OnnxLpPool"], [64, 1, 1, "", "OnnxLpPool_1"], [64, 1, 1, "", "OnnxLpPool_11"], [64, 1, 1, "", "OnnxLpPool_18"], [64, 1, 1, "", "OnnxLpPool_2"], [64, 1, 1, "", "OnnxMatMul"], [64, 1, 1, "", "OnnxMatMulInteger"], [64, 1, 1, "", "OnnxMatMulInteger_10"], [64, 1, 1, "", "OnnxMatMul_1"], [64, 1, 1, "", "OnnxMatMul_13"], [64, 1, 1, "", "OnnxMatMul_9"], [64, 1, 1, "", "OnnxMax"], [64, 1, 1, "", "OnnxMaxPool"], [64, 1, 1, "", "OnnxMaxPool_1"], [64, 1, 1, "", "OnnxMaxPool_10"], [64, 1, 1, "", "OnnxMaxPool_11"], [64, 1, 1, "", "OnnxMaxPool_12"], [64, 1, 1, "", "OnnxMaxPool_8"], [64, 1, 1, "", "OnnxMaxRoiPool"], [64, 1, 1, "", "OnnxMaxRoiPool_1"], [64, 1, 1, "", "OnnxMaxUnpool"], [64, 1, 1, "", "OnnxMaxUnpool_11"], [64, 1, 1, "", "OnnxMaxUnpool_9"], [64, 1, 1, "", "OnnxMax_1"], [64, 1, 1, "", "OnnxMax_12"], [64, 1, 1, "", "OnnxMax_13"], [64, 1, 1, "", "OnnxMax_6"], [64, 1, 1, "", "OnnxMax_8"], [64, 1, 1, "", "OnnxMean"], [64, 1, 1, "", "OnnxMeanVarianceNormalization"], [64, 1, 1, "", "OnnxMeanVarianceNormalization_13"], [64, 1, 1, "", "OnnxMeanVarianceNormalization_9"], [64, 1, 1, "", "OnnxMean_1"], [64, 1, 1, "", "OnnxMean_13"], [64, 1, 1, "", "OnnxMean_6"], [64, 1, 1, "", "OnnxMean_8"], [64, 1, 1, "", "OnnxMelWeightMatrix"], [64, 1, 1, "", "OnnxMelWeightMatrix_17"], [64, 1, 1, "", "OnnxMin"], [64, 1, 1, "", "OnnxMin_1"], [64, 1, 1, "", "OnnxMin_12"], [64, 1, 1, "", "OnnxMin_13"], [64, 1, 1, "", "OnnxMin_6"], [64, 1, 1, "", "OnnxMin_8"], [64, 1, 1, "", "OnnxMish"], [64, 1, 1, "", "OnnxMish_18"], [64, 1, 1, "", "OnnxMod"], [64, 1, 1, "", "OnnxMod_10"], [64, 1, 1, "", "OnnxMod_13"], [64, 1, 1, "", "OnnxMomentum"], [64, 1, 1, "", "OnnxMomentum_1"], [64, 1, 1, "", "OnnxMul"], [64, 1, 1, "", "OnnxMul_1"], [64, 1, 1, "", "OnnxMul_13"], [64, 1, 1, "", "OnnxMul_14"], [64, 1, 1, "", "OnnxMul_6"], [64, 1, 1, "", "OnnxMul_7"], [64, 1, 1, "", "OnnxMultinomial"], [64, 1, 1, "", "OnnxMultinomial_7"], [64, 1, 1, "", "OnnxNeg"], [64, 1, 1, "", "OnnxNeg_1"], [64, 1, 1, "", "OnnxNeg_13"], [64, 1, 1, "", "OnnxNeg_6"], [64, 1, 1, "", "OnnxNegativeLogLikelihoodLoss"], [64, 1, 1, "", "OnnxNegativeLogLikelihoodLoss_12"], [64, 1, 1, "", "OnnxNegativeLogLikelihoodLoss_13"], [64, 1, 1, "", "OnnxNonMaxSuppression"], [64, 1, 1, "", "OnnxNonMaxSuppression_10"], [64, 1, 1, "", "OnnxNonMaxSuppression_11"], [64, 1, 1, "", "OnnxNonZero"], [64, 1, 1, "", "OnnxNonZero_13"], [64, 1, 1, "", "OnnxNonZero_9"], [64, 1, 1, "", "OnnxNormalizer"], [64, 1, 1, "", "OnnxNormalizer_1"], [64, 1, 1, "", "OnnxNot"], [64, 1, 1, "", "OnnxNot_1"], [64, 1, 1, "", "OnnxOneHot"], [64, 1, 1, "", "OnnxOneHotEncoder"], [64, 1, 1, "", "OnnxOneHotEncoder_1"], [64, 1, 1, "", "OnnxOneHot_11"], [64, 1, 1, "", "OnnxOneHot_9"], [64, 1, 1, "", "OnnxOptional"], [64, 1, 1, "", "OnnxOptionalGetElement"], [64, 1, 1, "", "OnnxOptionalGetElement_15"], [64, 1, 1, "", "OnnxOptionalGetElement_18"], [64, 1, 1, "", "OnnxOptionalHasElement"], [64, 1, 1, "", "OnnxOptionalHasElement_15"], [64, 1, 1, "", "OnnxOptionalHasElement_18"], [64, 1, 1, "", "OnnxOptional_15"], [64, 1, 1, "", "OnnxOr"], [64, 1, 1, "", "OnnxOr_1"], [64, 1, 1, "", "OnnxOr_7"], [64, 1, 1, "", "OnnxPRelu"], [64, 1, 1, "", "OnnxPRelu_1"], [64, 1, 1, "", "OnnxPRelu_16"], [64, 1, 1, "", "OnnxPRelu_6"], [64, 1, 1, "", "OnnxPRelu_7"], [64, 1, 1, "", "OnnxPRelu_9"], [64, 1, 1, "", "OnnxPad"], [64, 1, 1, "", "OnnxPad_1"], [64, 1, 1, "", "OnnxPad_11"], [64, 1, 1, "", "OnnxPad_13"], [64, 1, 1, "", "OnnxPad_18"], [64, 1, 1, "", "OnnxPad_19"], [64, 1, 1, "", "OnnxPad_2"], [64, 1, 1, "", "OnnxPow"], [64, 1, 1, "", "OnnxPow_1"], [64, 1, 1, "", "OnnxPow_12"], [64, 1, 1, "", "OnnxPow_13"], [64, 1, 1, "", "OnnxPow_15"], [64, 1, 1, "", "OnnxPow_7"], [64, 1, 1, "", "OnnxQLinearConv"], [64, 1, 1, "", "OnnxQLinearConv_10"], [64, 1, 1, "", "OnnxQLinearMatMul"], [64, 1, 1, "", "OnnxQLinearMatMul_10"], [64, 1, 1, "", "OnnxQuantizeLinear"], [64, 1, 1, "", "OnnxQuantizeLinear_10"], [64, 1, 1, "", "OnnxQuantizeLinear_13"], [64, 1, 1, "", "OnnxRNN"], [64, 1, 1, "", "OnnxRNN_1"], [64, 1, 1, "", "OnnxRNN_14"], [64, 1, 1, "", "OnnxRNN_7"], [64, 1, 1, "", "OnnxRandomNormal"], [64, 1, 1, "", "OnnxRandomNormalLike"], [64, 1, 1, "", "OnnxRandomNormalLike_1"], [64, 1, 1, "", "OnnxRandomNormal_1"], [64, 1, 1, "", "OnnxRandomUniform"], [64, 1, 1, "", "OnnxRandomUniformLike"], [64, 1, 1, "", "OnnxRandomUniformLike_1"], [64, 1, 1, "", "OnnxRandomUniform_1"], [64, 1, 1, "", "OnnxRange"], [64, 1, 1, "", "OnnxRange_11"], [64, 1, 1, "", "OnnxReciprocal"], [64, 1, 1, "", "OnnxReciprocal_1"], [64, 1, 1, "", "OnnxReciprocal_13"], [64, 1, 1, "", "OnnxReciprocal_6"], [64, 1, 1, "", "OnnxReduceL1"], [64, 1, 1, "", "OnnxReduceL1_1"], [64, 1, 1, "", "OnnxReduceL1_11"], [64, 1, 1, "", "OnnxReduceL1_13"], [64, 1, 1, "", "OnnxReduceL1_18"], [64, 1, 1, "", "OnnxReduceL2"], [64, 1, 1, "", "OnnxReduceL2_1"], [64, 1, 1, "", "OnnxReduceL2_11"], [64, 1, 1, "", "OnnxReduceL2_13"], [64, 1, 1, "", "OnnxReduceL2_18"], [64, 1, 1, "", "OnnxReduceLogSum"], [64, 1, 1, "", "OnnxReduceLogSumExp"], [64, 1, 1, "", "OnnxReduceLogSumExp_1"], [64, 1, 1, "", "OnnxReduceLogSumExp_11"], [64, 1, 1, "", "OnnxReduceLogSumExp_13"], [64, 1, 1, "", "OnnxReduceLogSumExp_18"], [64, 1, 1, "", "OnnxReduceLogSum_1"], [64, 1, 1, "", "OnnxReduceLogSum_11"], [64, 1, 1, "", "OnnxReduceLogSum_13"], [64, 1, 1, "", "OnnxReduceLogSum_18"], [64, 1, 1, "", "OnnxReduceMax"], [64, 1, 1, "", "OnnxReduceMax_1"], [64, 1, 1, "", "OnnxReduceMax_11"], [64, 1, 1, "", "OnnxReduceMax_12"], [64, 1, 1, "", "OnnxReduceMax_13"], [64, 1, 1, "", "OnnxReduceMax_18"], [64, 1, 1, "", "OnnxReduceMean"], [64, 1, 1, "", "OnnxReduceMean_1"], [64, 1, 1, "", "OnnxReduceMean_11"], [64, 1, 1, "", "OnnxReduceMean_13"], [64, 1, 1, "", "OnnxReduceMean_18"], [64, 1, 1, "", "OnnxReduceMin"], [64, 1, 1, "", "OnnxReduceMin_1"], [64, 1, 1, "", "OnnxReduceMin_11"], [64, 1, 1, "", "OnnxReduceMin_12"], [64, 1, 1, "", "OnnxReduceMin_13"], [64, 1, 1, "", "OnnxReduceMin_18"], [64, 1, 1, "", "OnnxReduceProd"], [64, 1, 1, "", "OnnxReduceProd_1"], [64, 1, 1, "", "OnnxReduceProd_11"], [64, 1, 1, "", "OnnxReduceProd_13"], [64, 1, 1, "", "OnnxReduceProd_18"], [64, 1, 1, "", "OnnxReduceSum"], [64, 1, 1, "", "OnnxReduceSumSquare"], [64, 1, 1, "", "OnnxReduceSumSquare_1"], [64, 1, 1, "", "OnnxReduceSumSquare_11"], [64, 1, 1, "", "OnnxReduceSumSquare_13"], [64, 1, 1, "", "OnnxReduceSumSquare_18"], [64, 1, 1, "", "OnnxReduceSum_1"], [64, 1, 1, "", "OnnxReduceSum_11"], [64, 1, 1, "", "OnnxReduceSum_13"], [64, 1, 1, "", "OnnxRelu"], [64, 1, 1, "", "OnnxRelu_1"], [64, 1, 1, "", "OnnxRelu_13"], [64, 1, 1, "", "OnnxRelu_14"], [64, 1, 1, "", "OnnxRelu_6"], [64, 1, 1, "", "OnnxReshape"], [64, 1, 1, "", "OnnxReshape_1"], [64, 1, 1, "", "OnnxReshape_13"], [64, 1, 1, "", "OnnxReshape_14"], [64, 1, 1, "", "OnnxReshape_5"], [64, 1, 1, "", "OnnxResize"], [64, 1, 1, "", "OnnxResize_10"], [64, 1, 1, "", "OnnxResize_11"], [64, 1, 1, "", "OnnxResize_13"], [64, 1, 1, "", "OnnxResize_18"], [64, 1, 1, "", "OnnxResize_19"], [64, 1, 1, "", "OnnxReverseSequence"], [64, 1, 1, "", "OnnxReverseSequence_10"], [64, 1, 1, "", "OnnxRoiAlign"], [64, 1, 1, "", "OnnxRoiAlign_10"], [64, 1, 1, "", "OnnxRoiAlign_16"], [64, 1, 1, "", "OnnxRound"], [64, 1, 1, "", "OnnxRound_11"], [64, 1, 1, "", "OnnxSTFT"], [64, 1, 1, "", "OnnxSTFT_17"], [64, 1, 1, "", "OnnxSVMClassifier"], [64, 1, 1, "", "OnnxSVMClassifier_1"], [64, 1, 1, "", "OnnxSVMRegressor"], [64, 1, 1, "", "OnnxSVMRegressor_1"], [64, 1, 1, "", "OnnxScaler"], [64, 1, 1, "", "OnnxScaler_1"], [64, 1, 1, "", "OnnxScan"], [64, 1, 1, "", "OnnxScan_11"], [64, 1, 1, "", "OnnxScan_16"], [64, 1, 1, "", "OnnxScan_8"], [64, 1, 1, "", "OnnxScan_9"], [64, 1, 1, "", "OnnxScatter"], [64, 1, 1, "", "OnnxScatterElements"], [64, 1, 1, "", "OnnxScatterElements_11"], [64, 1, 1, "", "OnnxScatterElements_13"], [64, 1, 1, "", "OnnxScatterElements_16"], [64, 1, 1, "", "OnnxScatterElements_18"], [64, 1, 1, "", "OnnxScatterND"], [64, 1, 1, "", "OnnxScatterND_11"], [64, 1, 1, "", "OnnxScatterND_13"], [64, 1, 1, "", "OnnxScatterND_16"], [64, 1, 1, "", "OnnxScatterND_18"], [64, 1, 1, "", "OnnxScatter_11"], [64, 1, 1, "", "OnnxScatter_9"], [64, 1, 1, "", "OnnxSelu"], [64, 1, 1, "", "OnnxSelu_1"], [64, 1, 1, "", "OnnxSelu_6"], [64, 1, 1, "", "OnnxSequenceAt"], [64, 1, 1, "", "OnnxSequenceAt_11"], [64, 1, 1, "", "OnnxSequenceConstruct"], [64, 1, 1, "", "OnnxSequenceConstruct_11"], [64, 1, 1, "", "OnnxSequenceEmpty"], [64, 1, 1, "", "OnnxSequenceEmpty_11"], [64, 1, 1, "", "OnnxSequenceErase"], [64, 1, 1, "", "OnnxSequenceErase_11"], [64, 1, 1, "", "OnnxSequenceInsert"], [64, 1, 1, "", "OnnxSequenceInsert_11"], [64, 1, 1, "", "OnnxSequenceLength"], [64, 1, 1, "", "OnnxSequenceLength_11"], [64, 1, 1, "", "OnnxSequenceMap"], [64, 1, 1, "", "OnnxSequenceMap_17"], [64, 1, 1, "", "OnnxShape"], [64, 1, 1, "", "OnnxShape_1"], [64, 1, 1, "", "OnnxShape_13"], [64, 1, 1, "", "OnnxShape_15"], [64, 1, 1, "", "OnnxShrink"], [64, 1, 1, "", "OnnxShrink_9"], [64, 1, 1, "", "OnnxSigmoid"], [64, 1, 1, "", "OnnxSigmoid_1"], [64, 1, 1, "", "OnnxSigmoid_13"], [64, 1, 1, "", "OnnxSigmoid_6"], [64, 1, 1, "", "OnnxSign"], [64, 1, 1, "", "OnnxSign_13"], [64, 1, 1, "", "OnnxSign_9"], [64, 1, 1, "", "OnnxSin"], [64, 1, 1, "", "OnnxSin_7"], [64, 1, 1, "", "OnnxSinh"], [64, 1, 1, "", "OnnxSinh_9"], [64, 1, 1, "", "OnnxSize"], [64, 1, 1, "", "OnnxSize_1"], [64, 1, 1, "", "OnnxSize_13"], [64, 1, 1, "", "OnnxSlice"], [64, 1, 1, "", "OnnxSlice_1"], [64, 1, 1, "", "OnnxSlice_10"], [64, 1, 1, "", "OnnxSlice_11"], [64, 1, 1, "", "OnnxSlice_13"], [64, 1, 1, "", "OnnxSoftmax"], [64, 1, 1, "", "OnnxSoftmaxCrossEntropyLoss"], [64, 1, 1, "", "OnnxSoftmaxCrossEntropyLoss_12"], [64, 1, 1, "", "OnnxSoftmaxCrossEntropyLoss_13"], [64, 1, 1, "", "OnnxSoftmax_1"], [64, 1, 1, "", "OnnxSoftmax_11"], [64, 1, 1, "", "OnnxSoftmax_13"], [64, 1, 1, "", "OnnxSoftplus"], [64, 1, 1, "", "OnnxSoftplus_1"], [64, 1, 1, "", "OnnxSoftsign"], [64, 1, 1, "", "OnnxSoftsign_1"], [64, 1, 1, "", "OnnxSpaceToDepth"], [64, 1, 1, "", "OnnxSpaceToDepth_1"], [64, 1, 1, "", "OnnxSpaceToDepth_13"], [64, 1, 1, "", "OnnxSplit"], [64, 1, 1, "", "OnnxSplitToSequence"], [64, 1, 1, "", "OnnxSplitToSequence_11"], [64, 1, 1, "", "OnnxSplit_1"], [64, 1, 1, "", "OnnxSplit_11"], [64, 1, 1, "", "OnnxSplit_13"], [64, 1, 1, "", "OnnxSplit_18"], [64, 1, 1, "", "OnnxSplit_2"], [64, 1, 1, "", "OnnxSqrt"], [64, 1, 1, "", "OnnxSqrt_1"], [64, 1, 1, "", "OnnxSqrt_13"], [64, 1, 1, "", "OnnxSqrt_6"], [64, 1, 1, "", "OnnxSqueeze"], [64, 1, 1, "", "OnnxSqueeze_1"], [64, 1, 1, "", "OnnxSqueeze_11"], [64, 1, 1, "", "OnnxSqueeze_13"], [64, 1, 1, "", "OnnxStringNormalizer"], [64, 1, 1, "", "OnnxStringNormalizer_10"], [64, 1, 1, "", "OnnxSub"], [64, 1, 1, "", "OnnxSub_1"], [64, 1, 1, "", "OnnxSub_13"], [64, 1, 1, "", "OnnxSub_14"], [64, 1, 1, "", "OnnxSub_6"], [64, 1, 1, "", "OnnxSub_7"], [64, 1, 1, "", "OnnxSum"], [64, 1, 1, "", "OnnxSum_1"], [64, 1, 1, "", "OnnxSum_13"], [64, 1, 1, "", "OnnxSum_6"], [64, 1, 1, "", "OnnxSum_8"], [64, 1, 1, "", "OnnxTan"], [64, 1, 1, "", "OnnxTan_7"], [64, 1, 1, "", "OnnxTanh"], [64, 1, 1, "", "OnnxTanh_1"], [64, 1, 1, "", "OnnxTanh_13"], [64, 1, 1, "", "OnnxTanh_6"], [64, 1, 1, "", "OnnxTfIdfVectorizer"], [64, 1, 1, "", "OnnxTfIdfVectorizer_9"], [64, 1, 1, "", "OnnxThresholdedRelu"], [64, 1, 1, "", "OnnxThresholdedRelu_10"], [64, 1, 1, "", "OnnxTile"], [64, 1, 1, "", "OnnxTile_1"], [64, 1, 1, "", "OnnxTile_13"], [64, 1, 1, "", "OnnxTile_6"], [64, 1, 1, "", "OnnxTopK"], [64, 1, 1, "", "OnnxTopK_1"], [64, 1, 1, "", "OnnxTopK_10"], [64, 1, 1, "", "OnnxTopK_11"], [64, 1, 1, "", "OnnxTranspose"], [64, 1, 1, "", "OnnxTranspose_1"], [64, 1, 1, "", "OnnxTranspose_13"], [64, 1, 1, "", "OnnxTreeEnsembleClassifier"], [64, 1, 1, "", "OnnxTreeEnsembleClassifier_1"], [64, 1, 1, "", "OnnxTreeEnsembleClassifier_3"], [64, 1, 1, "", "OnnxTreeEnsembleRegressor"], [64, 1, 1, "", "OnnxTreeEnsembleRegressor_1"], [64, 1, 1, "", "OnnxTreeEnsembleRegressor_3"], [64, 1, 1, "", "OnnxTrilu"], [64, 1, 1, "", "OnnxTrilu_14"], [64, 1, 1, "", "OnnxUnique"], [64, 1, 1, "", "OnnxUnique_11"], [64, 1, 1, "", "OnnxUnsqueeze"], [64, 1, 1, "", "OnnxUnsqueeze_1"], [64, 1, 1, "", "OnnxUnsqueeze_11"], [64, 1, 1, "", "OnnxUnsqueeze_13"], [64, 1, 1, "", "OnnxUpsample"], [64, 1, 1, "", "OnnxUpsample_10"], [64, 1, 1, "", "OnnxUpsample_7"], [64, 1, 1, "", "OnnxUpsample_9"], [64, 1, 1, "", "OnnxWhere"], [64, 1, 1, "", "OnnxWhere_16"], [64, 1, 1, "", "OnnxWhere_9"], [64, 1, 1, "", "OnnxXor"], [64, 1, 1, "", "OnnxXor_1"], [64, 1, 1, "", "OnnxXor_7"], [64, 1, 1, "", "OnnxZipMap"], [64, 1, 1, "", "OnnxZipMap_1"]], "skl2onnx.algebra.sklearn_ops": [[64, 1, 1, "", "OnnxCastRegressor"], [64, 1, 1, "", "OnnxCastTransformer"], [64, 1, 1, "", "OnnxReplaceTransformer"], [64, 1, 1, "", "OnnxSklearnARDRegression"], [64, 1, 1, "", "OnnxSklearnAdaBoostClassifier"], [64, 1, 1, "", "OnnxSklearnAdaBoostRegressor"], [64, 1, 1, "", "OnnxSklearnBaggingClassifier"], [64, 1, 1, "", "OnnxSklearnBaggingRegressor"], [64, 1, 1, "", "OnnxSklearnBayesianGaussianMixture"], [64, 1, 1, "", "OnnxSklearnBayesianRidge"], [64, 1, 1, "", "OnnxSklearnBernoulliNB"], [64, 1, 1, "", "OnnxSklearnBinarizer"], [64, 1, 1, "", "OnnxSklearnCalibratedClassifierCV"], [64, 1, 1, "", "OnnxSklearnCategoricalNB"], [64, 1, 1, "id1", "OnnxSklearnColumnTransformer"], [64, 1, 1, "", "OnnxSklearnComplementNB"], [64, 1, 1, "", "OnnxSklearnCountVectorizer"], [64, 1, 1, "", "OnnxSklearnDecisionTreeClassifier"], [64, 1, 1, "", "OnnxSklearnDecisionTreeRegressor"], [64, 1, 1, "", "OnnxSklearnDictVectorizer"], [64, 1, 1, "", "OnnxSklearnElasticNet"], [64, 1, 1, "", "OnnxSklearnElasticNetCV"], [64, 1, 1, "", "OnnxSklearnExtraTreeClassifier"], [64, 1, 1, "", "OnnxSklearnExtraTreeRegressor"], [64, 1, 1, "", "OnnxSklearnExtraTreesClassifier"], [64, 1, 1, "", "OnnxSklearnExtraTreesRegressor"], [64, 1, 1, "", "OnnxSklearnFeatureHasher"], [64, 1, 1, "id2", "OnnxSklearnFeatureUnion"], [64, 1, 1, "", "OnnxSklearnFunctionTransformer"], [64, 1, 1, "", "OnnxSklearnGammaRegressor"], [64, 1, 1, "", "OnnxSklearnGaussianMixture"], [64, 1, 1, "", "OnnxSklearnGaussianNB"], [64, 1, 1, "", "OnnxSklearnGaussianProcessClassifier"], [64, 1, 1, "", "OnnxSklearnGaussianProcessRegressor"], [64, 1, 1, "", "OnnxSklearnGaussianRandomProjection"], [64, 1, 1, "", "OnnxSklearnGenericUnivariateSelect"], [64, 1, 1, "", "OnnxSklearnGradientBoostingClassifier"], [64, 1, 1, "", "OnnxSklearnGradientBoostingRegressor"], [64, 1, 1, "", "OnnxSklearnGridSearchCV"], [64, 1, 1, "", "OnnxSklearnHistGradientBoostingClassifier"], [64, 1, 1, "", "OnnxSklearnHistGradientBoostingRegressor"], [64, 1, 1, "", "OnnxSklearnHuberRegressor"], [64, 1, 1, "", "OnnxSklearnIncrementalPCA"], [64, 1, 1, "", "OnnxSklearnIsolationForest"], [64, 1, 1, "", "OnnxSklearnKBinsDiscretizer"], [64, 1, 1, "", "OnnxSklearnKMeans"], [64, 1, 1, "", "OnnxSklearnKNNImputer"], [64, 1, 1, "", "OnnxSklearnKNeighborsClassifier"], [64, 1, 1, "", "OnnxSklearnKNeighborsRegressor"], [64, 1, 1, "", "OnnxSklearnKNeighborsTransformer"], [64, 1, 1, "", "OnnxSklearnKernelCenterer"], [64, 1, 1, "", "OnnxSklearnKernelPCA"], [64, 1, 1, "", "OnnxSklearnLabelBinarizer"], [64, 1, 1, "", "OnnxSklearnLabelEncoder"], [64, 1, 1, "", "OnnxSklearnLars"], [64, 1, 1, "", "OnnxSklearnLarsCV"], [64, 1, 1, "", "OnnxSklearnLasso"], [64, 1, 1, "", "OnnxSklearnLassoCV"], [64, 1, 1, "", "OnnxSklearnLassoLars"], [64, 1, 1, "", "OnnxSklearnLassoLarsCV"], [64, 1, 1, "", "OnnxSklearnLassoLarsIC"], [64, 1, 1, "", "OnnxSklearnLinearDiscriminantAnalysis"], [64, 1, 1, "", "OnnxSklearnLinearRegression"], [64, 1, 1, "", "OnnxSklearnLinearSVC"], [64, 1, 1, "", "OnnxSklearnLinearSVR"], [64, 1, 1, "", "OnnxSklearnLocalOutlierFactor"], [64, 1, 1, "", "OnnxSklearnLogisticRegression"], [64, 1, 1, "", "OnnxSklearnLogisticRegressionCV"], [64, 1, 1, "", "OnnxSklearnMLPClassifier"], [64, 1, 1, "", "OnnxSklearnMLPRegressor"], [64, 1, 1, "", "OnnxSklearnMaxAbsScaler"], [64, 1, 1, "", "OnnxSklearnMinMaxScaler"], [64, 1, 1, "", "OnnxSklearnMiniBatchKMeans"], [64, 1, 1, "", "OnnxSklearnMultiOutputClassifier"], [64, 1, 1, "", "OnnxSklearnMultiOutputRegressor"], [64, 1, 1, "", "OnnxSklearnMultiTaskElasticNet"], [64, 1, 1, "", "OnnxSklearnMultiTaskElasticNetCV"], [64, 1, 1, "", "OnnxSklearnMultiTaskLasso"], [64, 1, 1, "", "OnnxSklearnMultiTaskLassoCV"], [64, 1, 1, "", "OnnxSklearnMultinomialNB"], [64, 1, 1, "", "OnnxSklearnNearestNeighbors"], [64, 1, 1, "", "OnnxSklearnNeighborhoodComponentsAnalysis"], [64, 1, 1, "", "OnnxSklearnNormalizer"], [64, 1, 1, "", "OnnxSklearnNuSVC"], [64, 1, 1, "", "OnnxSklearnNuSVR"], [64, 1, 1, "", "OnnxSklearnOneClassSVM"], [64, 1, 1, "", "OnnxSklearnOneHotEncoder"], [64, 1, 1, "", "OnnxSklearnOneVsOneClassifier"], [64, 1, 1, "", "OnnxSklearnOneVsRestClassifier"], [64, 1, 1, "", "OnnxSklearnOrdinalEncoder"], [64, 1, 1, "", "OnnxSklearnOrthogonalMatchingPursuit"], [64, 1, 1, "", "OnnxSklearnOrthogonalMatchingPursuitCV"], [64, 1, 1, "", "OnnxSklearnPCA"], [64, 1, 1, "", "OnnxSklearnPLSRegression"], [64, 1, 1, "", "OnnxSklearnPassiveAggressiveClassifier"], [64, 1, 1, "", "OnnxSklearnPassiveAggressiveRegressor"], [64, 1, 1, "", "OnnxSklearnPerceptron"], [64, 1, 1, "id0", "OnnxSklearnPipeline"], [64, 1, 1, "", "OnnxSklearnPoissonRegressor"], [64, 1, 1, "", "OnnxSklearnPolynomialFeatures"], [64, 1, 1, "", "OnnxSklearnPowerTransformer"], [64, 1, 1, "", "OnnxSklearnQuadraticDiscriminantAnalysis"], [64, 1, 1, "", "OnnxSklearnQuantileRegressor"], [64, 1, 1, "", "OnnxSklearnRANSACRegressor"], [64, 1, 1, "", "OnnxSklearnRFE"], [64, 1, 1, "", "OnnxSklearnRFECV"], [64, 1, 1, "", "OnnxSklearnRadiusNeighborsClassifier"], [64, 1, 1, "", "OnnxSklearnRadiusNeighborsRegressor"], [64, 1, 1, "", "OnnxSklearnRandomForestClassifier"], [64, 1, 1, "", "OnnxSklearnRandomForestRegressor"], [64, 1, 1, "", "OnnxSklearnRandomTreesEmbedding"], [64, 1, 1, "", "OnnxSklearnRidge"], [64, 1, 1, "", "OnnxSklearnRidgeCV"], [64, 1, 1, "", "OnnxSklearnRidgeClassifier"], [64, 1, 1, "", "OnnxSklearnRidgeClassifierCV"], [64, 1, 1, "", "OnnxSklearnRobustScaler"], [64, 1, 1, "", "OnnxSklearnSGDClassifier"], [64, 1, 1, "", "OnnxSklearnSGDOneClassSVM"], [64, 1, 1, "", "OnnxSklearnSGDRegressor"], [64, 1, 1, "", "OnnxSklearnSVC"], [64, 1, 1, "", "OnnxSklearnSVR"], [64, 1, 1, "", "OnnxSklearnSelectFdr"], [64, 1, 1, "", "OnnxSklearnSelectFpr"], [64, 1, 1, "", "OnnxSklearnSelectFromModel"], [64, 1, 1, "", "OnnxSklearnSelectFwe"], [64, 1, 1, "", "OnnxSklearnSelectKBest"], [64, 1, 1, "", "OnnxSklearnSelectPercentile"], [64, 1, 1, "", "OnnxSklearnSimpleImputer"], [64, 1, 1, "", "OnnxSklearnStackingClassifier"], [64, 1, 1, "", "OnnxSklearnStackingRegressor"], [64, 1, 1, "", "OnnxSklearnStandardScaler"], [64, 1, 1, "", "OnnxSklearnTfidfTransformer"], [64, 1, 1, "", "OnnxSklearnTfidfVectorizer"], [64, 1, 1, "", "OnnxSklearnTheilSenRegressor"], [64, 1, 1, "", "OnnxSklearnTruncatedSVD"], [64, 1, 1, "", "OnnxSklearnTweedieRegressor"], [64, 1, 1, "", "OnnxSklearnVarianceThreshold"], [64, 1, 1, "", "OnnxSklearnVotingClassifier"], [64, 1, 1, "", "OnnxSklearnVotingRegressor"]], "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer": [[64, 2, 1, "", "onnx_converter"], [64, 2, 1, "", "onnx_parser"], [64, 2, 1, "", "onnx_shape_calculator"], [64, 2, 1, "", "to_onnx"], [64, 2, 1, "", "to_onnx_operator"]], "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion": [[64, 2, 1, "", "onnx_converter"], [64, 2, 1, "", "onnx_parser"], [64, 2, 1, "", "onnx_shape_calculator"], [64, 2, 1, "", "to_onnx"], [64, 2, 1, "", "to_onnx_operator"]], "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline": [[64, 2, 1, "", "onnx_converter"], [64, 2, 1, "", "onnx_parser"], [64, 2, 1, "", "onnx_shape_calculator"], [64, 2, 1, "", "to_onnx"], [64, 2, 1, "", "to_onnx_operator"]], "skl2onnx.common._container": [[0, 1, 1, "", "ModelComponentContainer"], [0, 1, 1, "", "SklearnModelContainerNode"]], "skl2onnx.common._container.ModelComponentContainer": [[0, 2, 1, "", "add_initializer"], [0, 2, 1, "", "add_input"], [0, 2, 1, "", "add_node"], [0, 2, 1, "", "add_output"]], "skl2onnx.common._container.SklearnModelContainerNode": [[0, 3, 1, "", "input_names"], [0, 3, 1, "", "output_names"]], "skl2onnx.common._topology": [[0, 1, 1, "", "Operator"], [0, 1, 1, "", "Scope"], [0, 1, 1, "", "Topology"], [0, 1, 1, "", "Variable"], [0, 0, 1, "", "convert_topology"]], "skl2onnx.common._topology.Scope": [[0, 2, 1, "", "get_unique_operator_name"], [0, 2, 1, "", "get_unique_variable_name"]], "skl2onnx.common.utils": [[0, 0, 1, "", "check_input_and_output_numbers"], [0, 0, 1, "", "check_input_and_output_types"]], "skl2onnx": [[0, 0, 1, "", "convert_sklearn"], [0, 0, 1, "", "get_latest_tested_opset_version"], [0, 0, 1, "", "supported_converters"], [0, 0, 1, "", "to_onnx"], [0, 0, 1, "", "update_registered_converter"], [0, 0, 1, "", "update_registered_parser"]], "skl2onnx.helpers.onnx_helper": [[0, 0, 1, "", "enumerate_model_node_outputs"], [0, 0, 1, "", "load_onnx_model"], [0, 0, 1, "", "save_onnx_model"], [0, 0, 1, "", "select_model_inputs_outputs"]], "skl2onnx.operator_converters.text_vectoriser": [[62, 0, 1, "", "convert_sklearn_text_vectorizer"]]}, "objtypes": {"0": "py:function", "1": "py:class", "2": "py:method", "3": "py:property"}, "objnames": {"0": ["py", "function", "Python function"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "property", "Python property"]}, "titleterms": {"api": [0, 22], "summari": 0, "version": 0, "convert": [0, 5, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 24, 25, 29, 31, 34, 35, 38, 42, 43, 44, 45, 46, 47, 48, 49, 51, 52, 56, 57, 59, 61, 62, 63, 64, 65, 67, 68], "option": [0, 33, 35, 49, 62], "log": [0, 19], "regist": [0, 24, 25, 42, 43, 44, 45], "new": [0, 6, 12, 46, 48, 49, 50, 52, 63], "manipul": 0, "onnx": [0, 2, 3, 4, 7, 9, 10, 12, 13, 14, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 29, 30, 32, 38, 39, 40, 41, 46, 47, 48, 49, 50, 52, 54, 57, 59, 61, 64, 69], "graph": [0, 3, 7, 10, 12, 13, 14, 17, 22, 24, 25, 26, 37, 38, 39, 40, 41, 42, 43, 45, 46, 48, 50, 52, 57], "parser": [0, 13, 14, 50, 63], "util": 0, "contributor": 0, "concept": 0, "contain": 0, "node": [0, 37], "scope": 0, "topologi": 0, "galleri": 1, "exampl": [1, 6, 22, 28, 31, 41, 45, 51, 52, 57, 63], "runtim": [2, 9, 29, 30, 37, 52, 53, 61, 69], "backend": 2, "compar": [3, 4, 7, 11, 17, 24, 25, 35, 42, 43, 45], "cdist": 3, "scipi": 3, "onnxruntim": [3, 10, 15, 53], "benchmark": [3, 4, 30], "pipelin": [4, 6, 7, 10, 17, 18, 23, 24, 25, 26, 29, 30, 33, 36, 37, 40, 42, 43, 44, 45, 62, 63, 64], "creat": [4, 7, 17, 18], "convers": [4, 5, 12, 13, 14, 18, 19, 21, 26, 30, 33, 34, 39, 46, 47, 48, 49, 50, 54, 57, 63], "output": [4, 8, 11, 17, 35, 38, 41, 50], "intermedi": [4, 17, 18, 37, 41], "step": [4, 18, 37, 61], "model": [5, 8, 9, 10, 11, 12, 13, 14, 16, 19, 21, 23, 24, 25, 29, 33, 35, 38, 40, 45, 46, 47, 48, 49, 56, 59, 61, 64, 68], "reduc": 5, "list": [5, 33, 34], "oper": [5, 21, 22, 34, 52, 64], "gaussianmixtur": [5, 34], "default": [5, 34, 35], "without": [5, 11, 34], "reducelogsumexp": [5, 34], "process": [5, 30, 34, 44], "time": [5, 11, 27, 30, 34, 35, 44, 49, 58], "If": [5, 34], "cannot": [5, 34], "discrep": [6, 16, 36, 44, 54, 56, 63], "standardscal": 6, "an": [6, 39, 40], "fail": 6, "columntransform": 7, "train": [7, 8, 9, 11, 16, 17, 19, 24, 25, 26, 29, 30, 35, 38, 42, 43, 44, 45, 55, 56, 61], "complex": [7, 17, 63], "defin": [7, 17], "input": [7, 17, 39, 41], "predict": [7, 9, 10, 11, 17, 24, 25, 29, 35, 39, 42, 43, 45], "displai": [7, 10, 12, 13, 14, 17, 22, 24, 25, 26], "probabl": [8, 11], "raw": [8, 62], "score": [8, 13, 14, 62], "type": [8, 11], "decision_funct": 8, "comput": [9, 17, 27, 58], "differ": 10, "wai": [10, 47], "simpl": [10, 21, 31, 51, 57], "kmean": 10, "custom": [10, 13, 14, 21, 31, 46, 47, 48, 49, 56, 68], "object": 10, "vector": 11, "zipmap": [11, 33, 35, 62], "One": [11, 33, 57], "per": 11, "class": [11, 62], "let": [11, 35], "": [11, 35], "write": [12, 67], "your": [12, 59, 61], "own": 12, "implement": [12, 46, 47, 48], "transform": [12, 50, 52], "experiment": 12, "mnist": 12, "repeat": 12, "t": 12, "sne": 12, "shape_calcul": 12, "when": [13, 14, 34, 36, 51], "i": [13, 14, 32], "neither": [13, 14], "classifi": [13, 14, 24, 25, 35, 40, 42, 43, 45, 62], "nor": [13, 14], "regressor": [13, 14, 44], "iri": [13, 14], "final": [13, 14, 37, 42, 43, 45, 46, 48, 50, 52], "test": [13, 14], "altern": 14, "error": 15, "gaussianprocessorregressor": 16, "us": [16, 48, 61, 65], "doubl": 16, "first": 16, "attempt": 16, "second": 16, "variabl": [16, 52], "dimens": 16, "third": 16, "size": 16, "increas": 16, "return_std": 16, "true": [16, 35], "walk": 17, "through": 17, "sub": 17, "investig": [18, 37, 63], "pickl": 18, "verbos": 19, "paramet": [19, 38], "metadata": 20, "nmf": 21, "decomposit": 21, "build": 21, "plai": 22, "python": [22, 29, 37, 52], "same": [22, 45], "sklearn": [22, 59], "multipl": 22, "draw": 23, "retriev": [23, 40], "json": 23, "format": [23, 61], "lightgbm": [24, 43, 44], "lgbmclassifi": [24, 43, 44], "again": [24, 25, 43, 45], "xgboost": [25, 45], "xgbclassifi": [25, 45], "tfidfvector": [26, 51, 54, 62], "deploi": 29, "scikit": [29, 59, 64], "learn": [29, 40, 59, 64], "comparison": [29, 49], "measur": 30, "woeencod": 31, "from": [31, 65], "categorical_encod": 31, "A": [31, 39, 49, 50, 51, 52, 57, 68], "ordinalencod": 31, "what": 32, "opset": 32, "number": [32, 50], "data": [32, 36, 55], "other": [32, 48, 65, 67], "mani": 33, "possibl": 33, "raw_scor": 33, "decision_path": 33, "avail": [33, 64], "black": 34, "choos": 35, "appropri": 35, "behaviour": 35, "fals": 35, "column": 35, "zimpap": 35, "output_class_label": 35, "multioutputclassifi": 35, "issu": [36, 51], "switch": 36, "float": 36, "more": [36, 40], "The": [36, 66], "casttransform": 36, "sledgehamm": 36, "result": [37, 41], "look": 37, "everi": 37, "store": 38, "arrai": 38, "one": [38, 40], "add": 38, "infer": 38, "select": 38, "save": 38, "load": [38, 40, 61], "datafram": 39, "dataset": [39, 54, 55], "categori": 39, "unhid": 39, "logic": 39, "transfer": 40, "imag": 40, "pca": 40, "remov": 40, "layer": 40, "end": 40, "modifi": 41, "basic": 41, "chang": [41, 50], "name": 41, "renam": 41, "catboost": 42, "catboostclassifi": 42, "lgbmregressor": 44, "split": 44, "influenc": 44, "xgbregressor": 45, "booster": 45, "two": [47, 50], "ad": 50, "tricki": 51, "countvector": [51, 62], "imposs": 51, "traceabletfidfvector": 51, "fast": [52, 53], "design": 52, "which": 52, "decorrel": 52, "extend": [52, 69], "shape": [52, 63], "calcul": [52, 63], "eig": 52, "registr": 52, "deal": 54, "tf": 54, "idf": 54, "import": [54, 55], "setup": [54, 55], "artifici": [54, 55], "fit": 54, "execut": 54, "tfidf": 55, "spars": 55, "matric": 55, "ensembl": 55, "after": 55, "dens": 55, "nan": 55, "0": 55, "replac": 55, "conclus": 55, "pyod": 56, "iforest": 56, "check": 56, "woe": 57, "hot": 57, "half": 57, "line": 57, "tutori": 60, "introduct": 61, "quick": 61, "start": 61, "1": 61, "favorit": 61, "framework": 61, "2": 61, "export": 61, "3": 61, "run": 61, "convert_sklearn": 61, "to_onnx": 61, "initial_typ": 61, "gaussianprocessregressor": 62, "nearestneighbor": 62, "addit": 62, "inform": 62, "pickabl": 62, "titan": 63, "parameter": 63, "miss": 63, "support": 64, "cover": 64, "document": 64, "onnxcastregressor": 64, "onnxcasttransform": 64, "onnxordinalencod": 64, "onnxreplacetransform": 64, "onnxsklearnardregress": 64, "onnxsklearnadaboostclassifi": 64, "onnxsklearnadaboostregressor": 64, "onnxsklearnbaggingclassifi": 64, "onnxsklearnbaggingregressor": 64, "onnxsklearnbayesiangaussianmixtur": 64, "onnxsklearnbayesianridg": 64, "onnxsklearnbernoullinb": 64, "onnxsklearnbinar": 64, "onnxsklearncalibratedclassifiercv": 64, "onnxsklearncategoricalnb": 64, "onnxsklearncolumntransform": 64, "onnxsklearncomplementnb": 64, "onnxsklearncountvector": 64, "onnxsklearndecisiontreeclassifi": 64, "onnxsklearndecisiontreeregressor": 64, "onnxsklearndictvector": 64, "onnxsklearnelasticnet": 64, "onnxsklearnelasticnetcv": 64, "onnxsklearnextratreeclassifi": 64, "onnxsklearnextratreeregressor": 64, "onnxsklearnextratreesclassifi": 64, "onnxsklearnextratreesregressor": 64, "onnxsklearnfeaturehash": 64, "onnxsklearnfeatureunion": 64, "onnxsklearnfunctiontransform": 64, "onnxsklearngammaregressor": 64, "onnxsklearngaussianmixtur": 64, "onnxsklearngaussiannb": 64, "onnxsklearngaussianprocessclassifi": 64, "onnxsklearngaussianprocessregressor": 64, "onnxsklearngaussianrandomproject": 64, "onnxsklearngenericunivariateselect": 64, "onnxsklearngradientboostingclassifi": 64, "onnxsklearngradientboostingregressor": 64, "onnxsklearngridsearchcv": 64, "onnxsklearnhistgradientboostingclassifi": 64, "onnxsklearnhistgradientboostingregressor": 64, "onnxsklearnhuberregressor": 64, "onnxsklearnincrementalpca": 64, "onnxsklearnisolationforest": 64, "onnxsklearnkbinsdiscret": 64, "onnxsklearnkmean": 64, "onnxsklearnknnimput": 64, "onnxsklearnkneighborsclassifi": 64, "onnxsklearnkneighborsregressor": 64, "onnxsklearnkneighborstransform": 64, "onnxsklearnkernelcenter": 64, "onnxsklearnkernelpca": 64, "onnxsklearnlabelbinar": 64, "onnxsklearnlabelencod": 64, "onnxsklearnlar": 64, "onnxsklearnlarscv": 64, "onnxsklearnlasso": 64, "onnxsklearnlassocv": 64, "onnxsklearnlassolar": 64, "onnxsklearnlassolarscv": 64, "onnxsklearnlassolars": 64, "onnxsklearnlineardiscriminantanalysi": 64, "onnxsklearnlinearregress": 64, "onnxsklearnlinearsvc": 64, "onnxsklearnlinearsvr": 64, "onnxsklearnlocaloutlierfactor": 64, "onnxsklearnlogisticregress": 64, "onnxsklearnlogisticregressioncv": 64, "onnxsklearnmlpclassifi": 64, "onnxsklearnmlpregressor": 64, "onnxsklearnmaxabsscal": 64, "onnxsklearnminmaxscal": 64, "onnxsklearnminibatchkmean": 64, "onnxsklearnmultioutputclassifi": 64, "onnxsklearnmultioutputregressor": 64, "onnxsklearnmultitaskelasticnet": 64, "onnxsklearnmultitaskelasticnetcv": 64, "onnxsklearnmultitasklasso": 64, "onnxsklearnmultitasklassocv": 64, "onnxsklearnmultinomialnb": 64, "onnxsklearnnearestneighbor": 64, "onnxsklearnneighborhoodcomponentsanalysi": 64, "onnxsklearnnorm": 64, "onnxsklearnnusvc": 64, "onnxsklearnnusvr": 64, "onnxsklearnoneclasssvm": 64, "onnxsklearnonehotencod": 64, "onnxsklearnonevsoneclassifi": 64, "onnxsklearnonevsrestclassifi": 64, "onnxsklearnordinalencod": 64, "onnxsklearnorthogonalmatchingpursuit": 64, "onnxsklearnorthogonalmatchingpursuitcv": 64, "onnxsklearnpca": 64, "onnxsklearnplsregress": 64, "onnxsklearnpassiveaggressiveclassifi": 64, "onnxsklearnpassiveaggressiveregressor": 64, "onnxsklearnperceptron": 64, "onnxsklearnpipelin": 64, "onnxsklearnpoissonregressor": 64, "onnxsklearnpolynomialfeatur": 64, "onnxsklearnpowertransform": 64, "onnxsklearnquadraticdiscriminantanalysi": 64, "onnxsklearnquantileregressor": 64, "onnxsklearnransacregressor": 64, "onnxsklearnrf": 64, "onnxsklearnrfecv": 64, "onnxsklearnradiusneighborsclassifi": 64, "onnxsklearnradiusneighborsregressor": 64, "onnxsklearnrandomforestclassifi": 64, "onnxsklearnrandomforestregressor": 64, "onnxsklearnrandomtreesembed": 64, "onnxsklearnridg": 64, "onnxsklearnridgecv": 64, "onnxsklearnridgeclassifi": 64, "onnxsklearnridgeclassifiercv": 64, "onnxsklearnrobustscal": 64, "onnxsklearnsgdclassifi": 64, "onnxsklearnsgdoneclasssvm": 64, "onnxsklearnsgdregressor": 64, "onnxsklearnsvc": 64, "onnxsklearnsvr": 64, "onnxsklearnselectfdr": 64, "onnxsklearnselectfpr": 64, "onnxsklearnselectfrommodel": 64, "onnxsklearnselectfw": 64, "onnxsklearnselectkbest": 64, "onnxsklearnselectpercentil": 64, "onnxsklearnsimpleimput": 64, "onnxsklearnstackingclassifi": 64, "onnxsklearnstackingregressor": 64, "onnxsklearnstandardscal": 64, "onnxsklearntfidftransform": 64, "onnxsklearntfidfvector": 64, "onnxsklearntheilsenregressor": 64, "onnxsklearntraceablecountvector": 64, "onnxsklearntraceabletfidfvector": 64, "onnxsklearntruncatedsvd": 64, "onnxsklearntweedieregressor": 64, "onnxsklearnvariancethreshold": 64, "onnxsklearnvotingclassifi": 64, "onnxsklearnvotingregressor": 64, "onnxwoeencod": 64, "onnxwoetransform": 64, "onnxab": 64, "onnxabs_1": 64, "onnxabs_13": 64, "onnxabs_6": 64, "onnxaco": 64, "onnxacos_7": 64, "onnxacosh": 64, "onnxacosh_9": 64, "onnxadagrad": 64, "onnxadagrad_1": 64, "onnxadam": 64, "onnxadam_1": 64, "onnxadd": 64, "onnxadd_1": 64, "onnxadd_13": 64, "onnxadd_14": 64, "onnxadd_6": 64, "onnxadd_7": 64, "onnxand": 64, "onnxand_1": 64, "onnxand_7": 64, "onnxargmax": 64, "onnxargmax_1": 64, "onnxargmax_11": 64, "onnxargmax_12": 64, "onnxargmax_13": 64, "onnxargmin": 64, "onnxargmin_1": 64, "onnxargmin_11": 64, "onnxargmin_12": 64, "onnxargmin_13": 64, "onnxarrayfeatureextractor": 64, "onnxarrayfeatureextractor_1": 64, "onnxasin": 64, "onnxasin_7": 64, "onnxasinh": 64, "onnxasinh_9": 64, "onnxatan": 64, "onnxatan_7": 64, "onnxatanh": 64, "onnxatanh_9": 64, "onnxaveragepool": 64, "onnxaveragepool_1": 64, "onnxaveragepool_10": 64, "onnxaveragepool_11": 64, "onnxaveragepool_19": 64, "onnxaveragepool_7": 64, "onnxbatchnorm": 64, "onnxbatchnormalization_1": 64, "onnxbatchnormalization_14": 64, "onnxbatchnormalization_15": 64, "onnxbatchnormalization_6": 64, "onnxbatchnormalization_7": 64, "onnxbatchnormalization_9": 64, "onnxbernoulli": 64, "onnxbernoulli_15": 64, "onnxbinar": 64, "onnxbinarizer_1": 64, "onnxbitshift": 64, "onnxbitshift_11": 64, "onnxbitwiseand": 64, "onnxbitwiseand_18": 64, "onnxbitwisenot": 64, "onnxbitwisenot_18": 64, "onnxbitwiseor": 64, "onnxbitwiseor_18": 64, "onnxbitwisexor": 64, "onnxbitwisexor_18": 64, "onnxblackmanwindow": 64, "onnxblackmanwindow_17": 64, "onnxcast": 64, "onnxcastlik": 64, "onnxcastlike_15": 64, "onnxcastmap": 64, "onnxcastmap_1": 64, "onnxcast_1": 64, "onnxcast_13": 64, "onnxcast_6": 64, "onnxcast_9": 64, "onnxcategorymapp": 64, "onnxcategorymapper_1": 64, "onnxceil": 64, "onnxceil_1": 64, "onnxceil_13": 64, "onnxceil_6": 64, "onnxcelu": 64, "onnxcelu_12": 64, "onnxcentercroppad": 64, "onnxcentercroppad_18": 64, "onnxclip": 64, "onnxclip_1": 64, "onnxclip_11": 64, "onnxclip_12": 64, "onnxclip_13": 64, "onnxclip_6": 64, "onnxcol2im": 64, "onnxcol2im_18": 64, "onnxcompress": 64, "onnxcompress_11": 64, "onnxcompress_9": 64, "onnxconcat": 64, "onnxconcatfromsequ": 64, "onnxconcatfromsequence_11": 64, "onnxconcat_1": 64, "onnxconcat_11": 64, "onnxconcat_13": 64, "onnxconcat_4": 64, "onnxconst": 64, "onnxconstantofshap": 64, "onnxconstantofshape_9": 64, "onnxconstant_1": 64, "onnxconstant_11": 64, "onnxconstant_12": 64, "onnxconstant_13": 64, "onnxconstant_9": 64, "onnxconv": 64, "onnxconvinteg": 64, "onnxconvinteger_10": 64, "onnxconvtranspos": 64, "onnxconvtranspose_1": 64, "onnxconvtranspose_11": 64, "onnxconv_1": 64, "onnxconv_11": 64, "onnxco": 64, "onnxcos_7": 64, "onnxcosh": 64, "onnxcosh_9": 64, "onnxcumsum": 64, "onnxcumsum_11": 64, "onnxcumsum_14": 64, "onnxdft": 64, "onnxdft_17": 64, "onnxdepthtospac": 64, "onnxdepthtospace_1": 64, "onnxdepthtospace_11": 64, "onnxdepthtospace_13": 64, "onnxdequantizelinear": 64, "onnxdequantizelinear_10": 64, "onnxdequantizelinear_13": 64, "onnxdet": 64, "onnxdet_11": 64, "onnxdictvector": 64, "onnxdictvectorizer_1": 64, "onnxdiv": 64, "onnxdiv_1": 64, "onnxdiv_13": 64, "onnxdiv_14": 64, "onnxdiv_6": 64, "onnxdiv_7": 64, "onnxdropout": 64, "onnxdropout_1": 64, "onnxdropout_10": 64, "onnxdropout_12": 64, "onnxdropout_13": 64, "onnxdropout_6": 64, "onnxdropout_7": 64, "onnxdynamicquantizelinear": 64, "onnxdynamicquantizelinear_11": 64, "onnxeinsum": 64, "onnxeinsum_12": 64, "onnxelu": 64, "onnxelu_1": 64, "onnxelu_6": 64, "onnxequ": 64, "onnxequal_1": 64, "onnxequal_11": 64, "onnxequal_13": 64, "onnxequal_19": 64, "onnxequal_7": 64, "onnxerf": 64, "onnxerf_13": 64, "onnxerf_9": 64, "onnxexp": 64, "onnxexp_1": 64, "onnxexp_13": 64, "onnxexp_6": 64, "onnxexpand": 64, "onnxexpand_13": 64, "onnxexpand_8": 64, "onnxeyelik": 64, "onnxeyelike_9": 64, "onnxfeaturevector": 64, "onnxfeaturevectorizer_1": 64, "onnxflatten": 64, "onnxflatten_1": 64, "onnxflatten_11": 64, "onnxflatten_13": 64, "onnxflatten_9": 64, "onnxfloor": 64, "onnxfloor_1": 64, "onnxfloor_13": 64, "onnxfloor_6": 64, "onnxgru": 64, "onnxgru_1": 64, "onnxgru_14": 64, "onnxgru_3": 64, "onnxgru_7": 64, "onnxgath": 64, "onnxgatherel": 64, "onnxgatherelements_11": 64, "onnxgatherelements_13": 64, "onnxgathernd": 64, "onnxgathernd_11": 64, "onnxgathernd_12": 64, "onnxgathernd_13": 64, "onnxgather_1": 64, "onnxgather_11": 64, "onnxgather_13": 64, "onnxgemm": 64, "onnxgemm_1": 64, "onnxgemm_11": 64, "onnxgemm_13": 64, "onnxgemm_6": 64, "onnxgemm_7": 64, "onnxgemm_9": 64, "onnxglobalaveragepool": 64, "onnxglobalaveragepool_1": 64, "onnxgloballppool": 64, "onnxgloballppool_1": 64, "onnxgloballppool_2": 64, "onnxglobalmaxpool": 64, "onnxglobalmaxpool_1": 64, "onnxgradi": 64, "onnxgradient_1": 64, "onnxgreat": 64, "onnxgreaterorequ": 64, "onnxgreaterorequal_12": 64, "onnxgreaterorequal_16": 64, "onnxgreater_1": 64, "onnxgreater_13": 64, "onnxgreater_7": 64, "onnxgreater_9": 64, "onnxgridsampl": 64, "onnxgridsample_16": 64, "onnxgroupnorm": 64, "onnxgroupnormalization_18": 64, "onnxhammingwindow": 64, "onnxhammingwindow_17": 64, "onnxhannwindow": 64, "onnxhannwindow_17": 64, "onnxhardsigmoid": 64, "onnxhardsigmoid_1": 64, "onnxhardsigmoid_6": 64, "onnxhardswish": 64, "onnxhardswish_14": 64, "onnxhardmax": 64, "onnxhardmax_1": 64, "onnxhardmax_11": 64, "onnxhardmax_13": 64, "onnxident": 64, "onnxidentity_1": 64, "onnxidentity_13": 64, "onnxidentity_14": 64, "onnxidentity_16": 64, "onnxif": 64, "onnxif_1": 64, "onnxif_11": 64, "onnxif_13": 64, "onnxif_16": 64, "onnximput": 64, "onnximputer_1": 64, "onnxinstancenorm": 64, "onnxinstancenormalization_1": 64, "onnxinstancenormalization_6": 64, "onnxisinf": 64, "onnxisinf_10": 64, "onnxisnan": 64, "onnxisnan_13": 64, "onnxisnan_9": 64, "onnxlrn": 64, "onnxlrn_1": 64, "onnxlrn_13": 64, "onnxlstm": 64, "onnxlstm_1": 64, "onnxlstm_14": 64, "onnxlstm_7": 64, "onnxlabelencod": 64, "onnxlabelencoder_1": 64, "onnxlabelencoder_2": 64, "onnxlayernorm": 64, "onnxlayernormalization_17": 64, "onnxleakyrelu": 64, "onnxleakyrelu_1": 64, "onnxleakyrelu_16": 64, "onnxleakyrelu_6": 64, "onnxless": 64, "onnxlessorequ": 64, "onnxlessorequal_12": 64, "onnxlessorequal_16": 64, "onnxless_1": 64, "onnxless_13": 64, "onnxless_7": 64, "onnxless_9": 64, "onnxlinearclassifi": 64, "onnxlinearclassifier_1": 64, "onnxlinearregressor": 64, "onnxlinearregressor_1": 64, "onnxlog": 64, "onnxlogsoftmax": 64, "onnxlogsoftmax_1": 64, "onnxlogsoftmax_11": 64, "onnxlogsoftmax_13": 64, "onnxlog_1": 64, "onnxlog_13": 64, "onnxlog_6": 64, "onnxloop": 64, "onnxloop_1": 64, "onnxloop_11": 64, "onnxloop_13": 64, "onnxloop_16": 64, "onnxlpnorm": 64, "onnxlpnormalization_1": 64, "onnxlppool": 64, "onnxlppool_1": 64, "onnxlppool_11": 64, "onnxlppool_18": 64, "onnxlppool_2": 64, "onnxmatmul": 64, "onnxmatmulinteg": 64, "onnxmatmulinteger_10": 64, "onnxmatmul_1": 64, "onnxmatmul_13": 64, "onnxmatmul_9": 64, "onnxmax": 64, "onnxmaxpool": 64, "onnxmaxpool_1": 64, "onnxmaxpool_10": 64, "onnxmaxpool_11": 64, "onnxmaxpool_12": 64, "onnxmaxpool_8": 64, "onnxmaxroipool": 64, "onnxmaxroipool_1": 64, "onnxmaxunpool": 64, "onnxmaxunpool_11": 64, "onnxmaxunpool_9": 64, "onnxmax_1": 64, "onnxmax_12": 64, "onnxmax_13": 64, "onnxmax_6": 64, "onnxmax_8": 64, "onnxmean": 64, "onnxmeanvariancenorm": 64, "onnxmeanvariancenormalization_13": 64, "onnxmeanvariancenormalization_9": 64, "onnxmean_1": 64, "onnxmean_13": 64, "onnxmean_6": 64, "onnxmean_8": 64, "onnxmelweightmatrix": 64, "onnxmelweightmatrix_17": 64, "onnxmin": 64, "onnxmin_1": 64, "onnxmin_12": 64, "onnxmin_13": 64, "onnxmin_6": 64, "onnxmin_8": 64, "onnxmish": 64, "onnxmish_18": 64, "onnxmod": 64, "onnxmod_10": 64, "onnxmod_13": 64, "onnxmomentum": 64, "onnxmomentum_1": 64, "onnxmul": 64, "onnxmul_1": 64, "onnxmul_13": 64, "onnxmul_14": 64, "onnxmul_6": 64, "onnxmul_7": 64, "onnxmultinomi": 64, "onnxmultinomial_7": 64, "onnxneg": 64, "onnxneg_1": 64, "onnxneg_13": 64, "onnxneg_6": 64, "onnxnegativeloglikelihoodloss": 64, "onnxnegativeloglikelihoodloss_12": 64, "onnxnegativeloglikelihoodloss_13": 64, "onnxnonmaxsuppress": 64, "onnxnonmaxsuppression_10": 64, "onnxnonmaxsuppression_11": 64, "onnxnonzero": 64, "onnxnonzero_13": 64, "onnxnonzero_9": 64, "onnxnorm": 64, "onnxnormalizer_1": 64, "onnxnot": 64, "onnxnot_1": 64, "onnxonehot": 64, "onnxonehotencod": 64, "onnxonehotencoder_1": 64, "onnxonehot_11": 64, "onnxonehot_9": 64, "onnxopt": 64, "onnxoptionalgetel": 64, "onnxoptionalgetelement_15": 64, "onnxoptionalgetelement_18": 64, "onnxoptionalhasel": 64, "onnxoptionalhaselement_15": 64, "onnxoptionalhaselement_18": 64, "onnxoptional_15": 64, "onnxor": 64, "onnxor_1": 64, "onnxor_7": 64, "onnxprelu": 64, "onnxprelu_1": 64, "onnxprelu_16": 64, "onnxprelu_6": 64, "onnxprelu_7": 64, "onnxprelu_9": 64, "onnxpad": 64, "onnxpad_1": 64, "onnxpad_11": 64, "onnxpad_13": 64, "onnxpad_18": 64, "onnxpad_19": 64, "onnxpad_2": 64, "onnxpow": 64, "onnxpow_1": 64, "onnxpow_12": 64, "onnxpow_13": 64, "onnxpow_15": 64, "onnxpow_7": 64, "onnxqlinearconv": 64, "onnxqlinearconv_10": 64, "onnxqlinearmatmul": 64, "onnxqlinearmatmul_10": 64, "onnxquantizelinear": 64, "onnxquantizelinear_10": 64, "onnxquantizelinear_13": 64, "onnxrnn": 64, "onnxrnn_1": 64, "onnxrnn_14": 64, "onnxrnn_7": 64, "onnxrandomnorm": 64, "onnxrandomnormallik": 64, "onnxrandomnormallike_1": 64, "onnxrandomnormal_1": 64, "onnxrandomuniform": 64, "onnxrandomuniformlik": 64, "onnxrandomuniformlike_1": 64, "onnxrandomuniform_1": 64, "onnxrang": 64, "onnxrange_11": 64, "onnxreciproc": 64, "onnxreciprocal_1": 64, "onnxreciprocal_13": 64, "onnxreciprocal_6": 64, "onnxreducel1": 64, "onnxreducel1_1": 64, "onnxreducel1_11": 64, "onnxreducel1_13": 64, "onnxreducel1_18": 64, "onnxreducel2": 64, "onnxreducel2_1": 64, "onnxreducel2_11": 64, "onnxreducel2_13": 64, "onnxreducel2_18": 64, "onnxreducelogsum": 64, "onnxreducelogsumexp": 64, "onnxreducelogsumexp_1": 64, "onnxreducelogsumexp_11": 64, "onnxreducelogsumexp_13": 64, "onnxreducelogsumexp_18": 64, "onnxreducelogsum_1": 64, "onnxreducelogsum_11": 64, "onnxreducelogsum_13": 64, "onnxreducelogsum_18": 64, "onnxreducemax": 64, "onnxreducemax_1": 64, "onnxreducemax_11": 64, "onnxreducemax_12": 64, "onnxreducemax_13": 64, "onnxreducemax_18": 64, "onnxreducemean": 64, "onnxreducemean_1": 64, "onnxreducemean_11": 64, "onnxreducemean_13": 64, "onnxreducemean_18": 64, "onnxreducemin": 64, "onnxreducemin_1": 64, "onnxreducemin_11": 64, "onnxreducemin_12": 64, "onnxreducemin_13": 64, "onnxreducemin_18": 64, "onnxreduceprod": 64, "onnxreduceprod_1": 64, "onnxreduceprod_11": 64, "onnxreduceprod_13": 64, "onnxreduceprod_18": 64, "onnxreducesum": 64, "onnxreducesumsquar": 64, "onnxreducesumsquare_1": 64, "onnxreducesumsquare_11": 64, "onnxreducesumsquare_13": 64, "onnxreducesumsquare_18": 64, "onnxreducesum_1": 64, "onnxreducesum_11": 64, "onnxreducesum_13": 64, "onnxrelu": 64, "onnxrelu_1": 64, "onnxrelu_13": 64, "onnxrelu_14": 64, "onnxrelu_6": 64, "onnxreshap": 64, "onnxreshape_1": 64, "onnxreshape_13": 64, "onnxreshape_14": 64, "onnxreshape_5": 64, "onnxres": 64, "onnxresize_10": 64, "onnxresize_11": 64, "onnxresize_13": 64, "onnxresize_18": 64, "onnxresize_19": 64, "onnxreversesequ": 64, "onnxreversesequence_10": 64, "onnxroialign": 64, "onnxroialign_10": 64, "onnxroialign_16": 64, "onnxround": 64, "onnxround_11": 64, "onnxstft": 64, "onnxstft_17": 64, "onnxsvmclassifi": 64, "onnxsvmclassifier_1": 64, "onnxsvmregressor": 64, "onnxsvmregressor_1": 64, "onnxscal": 64, "onnxscaler_1": 64, "onnxscan": 64, "onnxscan_11": 64, "onnxscan_16": 64, "onnxscan_8": 64, "onnxscan_9": 64, "onnxscatt": 64, "onnxscatterel": 64, "onnxscatterelements_11": 64, "onnxscatterelements_13": 64, "onnxscatterelements_16": 64, "onnxscatterelements_18": 64, "onnxscatternd": 64, "onnxscatternd_11": 64, "onnxscatternd_13": 64, "onnxscatternd_16": 64, "onnxscatternd_18": 64, "onnxscatter_11": 64, "onnxscatter_9": 64, "onnxselu": 64, "onnxselu_1": 64, "onnxselu_6": 64, "onnxsequenceat": 64, "onnxsequenceat_11": 64, "onnxsequenceconstruct": 64, "onnxsequenceconstruct_11": 64, "onnxsequenceempti": 64, "onnxsequenceempty_11": 64, "onnxsequenceeras": 64, "onnxsequenceerase_11": 64, "onnxsequenceinsert": 64, "onnxsequenceinsert_11": 64, "onnxsequencelength": 64, "onnxsequencelength_11": 64, "onnxsequencemap": 64, "onnxsequencemap_17": 64, "onnxshap": 64, "onnxshape_1": 64, "onnxshape_13": 64, "onnxshape_15": 64, "onnxshrink": 64, "onnxshrink_9": 64, "onnxsigmoid": 64, "onnxsigmoid_1": 64, "onnxsigmoid_13": 64, "onnxsigmoid_6": 64, "onnxsign": 64, "onnxsign_13": 64, "onnxsign_9": 64, "onnxsin": 64, "onnxsin_7": 64, "onnxsinh": 64, "onnxsinh_9": 64, "onnxsiz": 64, "onnxsize_1": 64, "onnxsize_13": 64, "onnxslic": 64, "onnxslice_1": 64, "onnxslice_10": 64, "onnxslice_11": 64, "onnxslice_13": 64, "onnxsoftmax": 64, "onnxsoftmaxcrossentropyloss": 64, "onnxsoftmaxcrossentropyloss_12": 64, "onnxsoftmaxcrossentropyloss_13": 64, "onnxsoftmax_1": 64, "onnxsoftmax_11": 64, "onnxsoftmax_13": 64, "onnxsoftplu": 64, "onnxsoftplus_1": 64, "onnxsoftsign": 64, "onnxsoftsign_1": 64, "onnxspacetodepth": 64, "onnxspacetodepth_1": 64, "onnxspacetodepth_13": 64, "onnxsplit": 64, "onnxsplittosequ": 64, "onnxsplittosequence_11": 64, "onnxsplit_1": 64, "onnxsplit_11": 64, "onnxsplit_13": 64, "onnxsplit_18": 64, "onnxsplit_2": 64, "onnxsqrt": 64, "onnxsqrt_1": 64, "onnxsqrt_13": 64, "onnxsqrt_6": 64, "onnxsqueez": 64, "onnxsqueeze_1": 64, "onnxsqueeze_11": 64, "onnxsqueeze_13": 64, "onnxstringnorm": 64, "onnxstringnormalizer_10": 64, "onnxsub": 64, "onnxsub_1": 64, "onnxsub_13": 64, "onnxsub_14": 64, "onnxsub_6": 64, "onnxsub_7": 64, "onnxsum": 64, "onnxsum_1": 64, "onnxsum_13": 64, "onnxsum_6": 64, "onnxsum_8": 64, "onnxtan": 64, "onnxtan_7": 64, "onnxtanh": 64, "onnxtanh_1": 64, "onnxtanh_13": 64, "onnxtanh_6": 64, "onnxtfidfvector": 64, "onnxtfidfvectorizer_9": 64, "onnxthresholdedrelu": 64, "onnxthresholdedrelu_10": 64, "onnxtil": 64, "onnxtile_1": 64, "onnxtile_13": 64, "onnxtile_6": 64, "onnxtopk": 64, "onnxtopk_1": 64, "onnxtopk_10": 64, "onnxtopk_11": 64, "onnxtranspos": 64, "onnxtranspose_1": 64, "onnxtranspose_13": 64, "onnxtreeensembleclassifi": 64, "onnxtreeensembleclassifier_1": 64, "onnxtreeensembleclassifier_3": 64, "onnxtreeensembleregressor": 64, "onnxtreeensembleregressor_1": 64, "onnxtreeensembleregressor_3": 64, "onnxtrilu": 64, "onnxtrilu_14": 64, "onnxuniqu": 64, "onnxunique_11": 64, "onnxunsqueez": 64, "onnxunsqueeze_1": 64, "onnxunsqueeze_11": 64, "onnxunsqueeze_13": 64, "onnxupsampl": 64, "onnxupsample_10": 64, "onnxupsample_7": 64, "onnxupsample_9": 64, "onnxwher": 64, "onnxwhere_16": 64, "onnxwhere_9": 64, "onnxxor": 64, "onnxxor_1": 64, "onnxxor_7": 64, "onnxzipmap": 64, "onnxzipmap_1": 64, "librari": [65, 67], "easi": 66, "case": 66, "advanc": 70, "scenario": 70, "onnxboost": 64, "onnxcatboostclassifi": 64, "onnxcustomscorertransform": 64, "onnxdecorrelatetransform": 64, "onnxiforest": 64, "onnxlivedecorrelatetransform": 64, "onnxmockwrappedlightgbmboosterclassifi": 64, "onnxpredictabletsn": 64, "onnxsklearnlgbmclassifi": 64, "onnxsklearnlgbmregressor": 64, "onnxsklearntransformedtargetregressor": 64, "onnxsklearnxgbclassifi": 64, "onnxsklearnxgbregressor": 64, "onnxtransfertransform": 64, "onnxvalidatorclassifi": 64, "onnxwrappedlightgbmboost": 64, "onnxwrappedlightgbmboosterclassifi": 64}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.viewcode": 1, "sphinx": 57}, "alltitles": {"ONNX Runtime Backend for ONNX": [[2, "onnx-runtime-backend-for-onnx"]], "Errors with onnxruntime": [[15, "errors-with-onnxruntime"]], "Metadata": [[20, "metadata"]], "Fast runtime with onnxruntime": [[53, "fast-runtime-with-onnxruntime"]], "sklearn-onnx: Convert your scikit-learn model into ONNX": [[59, "sklearn-onnx-convert-your-scikit-learn-model-into-onnx"]], "Tutorial": [[60, "tutorial"]], "Using converters from other libraries": [[65, "using-converters-from-other-libraries"]], "The easy case": [[66, "the-easy-case"]], "Write converters for other libraries": [[67, "write-converters-for-other-libraries"]], "A custom converter for a custom model": [[68, "a-custom-converter-for-a-custom-model"]], "Extend ONNX, extend runtime": [[69, "extend-onnx-extend-runtime"]], "Advanced scenarios": [[70, "advanced-scenarios"]], "API Summary": [[0, "api-summary"]], "Version": [[0, "version"]], "Converters": [[0, "converters"]], "Converters options": [[0, "converters-options"]], "Logging": [[0, "logging"]], "Register a new converter": [[0, "register-a-new-converter"]], "Manipulate ONNX graphs": [[0, "manipulate-onnx-graphs"]], "Parsers": [[0, "parsers"]], "Utils for contributors": [[0, "utils-for-contributors"]], "Concepts": [[0, "concepts"]], "Containers": [[0, "containers"]], "Nodes": [[0, "nodes"]], "Scope": [[0, "scope"]], "Topology": [[0, "topology"]], "Gallery of examples": [[1, "gallery-of-examples"]], "Compare CDist with scipy": [[3, "compare-cdist-with-scipy"]], "ONNX Graph with CDist": [[3, "onnx-graph-with-cdist"]], "CDist and onnxruntime": [[3, "cdist-and-onnxruntime"]], "Benchmark": [[3, "benchmark"]], "Benchmark a pipeline": [[4, "benchmark-a-pipeline"]], "Create a pipeline": [[4, "create-a-pipeline"], [18, "create-a-pipeline"]], "Conversion to ONNX": [[4, "conversion-to-onnx"], [12, "conversion-to-onnx"], [13, "conversion-to-onnx"], [14, "conversion-to-onnx"], [18, "conversion-to-onnx"], [39, "conversion-to-onnx"], [54, "conversion-to-onnx"], [57, "conversion-to-onnx"]], "Comparing outputs": [[4, "comparing-outputs"]], "Benchmarks": [[4, "benchmarks"]], "Intermediate steps": [[4, "intermediate-steps"], [18, "intermediate-steps"]], "Convert a model with a reduced list of operators": [[5, "convert-a-model-with-a-reduced-list-of-operators"]], "GaussianMixture": [[5, "gaussianmixture"], [34, "gaussianmixture"]], "Default conversion": [[5, "default-conversion"], [34, "default-conversion"]], "Conversion without ReduceLogSumExp": [[5, "conversion-without-reducelogsumexp"], [34, "conversion-without-reducelogsumexp"]], "Processing time": [[5, "processing-time"], [34, "processing-time"], [44, "processing-time"]], "If the converter cannot convert without\u2026": [[5, "if-the-converter-cannot-convert-without"], [34, "if-the-converter-cannot-convert-without"]], "Discrepencies with StandardScaler": [[6, "discrepencies-with-standardscaler"]], "An example with fails": [[6, "an-example-with-fails"]], "New pipeline": [[6, "new-pipeline"]], "Convert a pipeline with ColumnTransformer": [[7, "convert-a-pipeline-with-columntransformer"]], "Create and train a complex pipeline": [[7, "create-and-train-a-complex-pipeline"], [17, "create-and-train-a-complex-pipeline"]], "Define the inputs of the ONNX graph": [[7, "define-the-inputs-of-the-onnx-graph"], [17, "define-the-inputs-of-the-onnx-graph"]], "Convert the pipeline into ONNX": [[7, "convert-the-pipeline-into-onnx"], [17, "convert-the-pipeline-into-onnx"]], "Compare the predictions": [[7, "compare-the-predictions"], [17, "compare-the-predictions"], [24, "compare-the-predictions"], [25, "compare-the-predictions"], [42, "compare-the-predictions"], [43, "compare-the-predictions"], [45, "compare-the-predictions"]], "Display the ONNX graph": [[7, "display-the-onnx-graph"], [10, "display-the-onnx-graph"], [12, "display-the-onnx-graph"], [13, "display-the-onnx-graph"], [14, "display-the-onnx-graph"], [22, "display-the-onnx-graph"], [24, "display-the-onnx-graph"], [25, "display-the-onnx-graph"], [26, "display-the-onnx-graph"]], "Probabilities or raw scores": [[8, "probabilities-or-raw-scores"]], "Train a model and convert it": [[8, "train-a-model-and-convert-it"], [11, "train-a-model-and-convert-it"], [35, "train-a-model-and-convert-it"]], "Output type": [[8, "output-type"], [11, "output-type"]], "Raw scores and decision_function": [[8, "raw-scores-and-decision-function"]], "Train, convert and predict a model": [[9, "train-convert-and-predict-a-model"]], "Train a model": [[9, "train-a-model"], [16, "train-a-model"], [19, "train-a-model"]], "Convert a model into ONNX": [[9, "convert-a-model-into-onnx"], [19, "convert-a-model-into-onnx"]], "Compute the prediction with ONNX Runtime": [[9, "compute-the-prediction-with-onnx-runtime"]], "Different ways to convert a model": [[10, "different-ways-to-convert-a-model"]], "Predict with onnxruntime": [[10, "predict-with-onnxruntime"]], "Simple KMeans": [[10, "simple-kmeans"]], "Pipeline and a custom object": [[10, "pipeline-and-a-custom-object"]], "Probabilities as a vector or as a ZipMap": [[11, "probabilities-as-a-vector-or-as-a-zipmap"]], "Without ZipMap": [[11, "without-zipmap"]], "One output per class": [[11, "one-output-per-class"]], "Let\u2019s compare prediction time": [[11, "let-s-compare-prediction-time"], [35, "let-s-compare-prediction-time"]], "Write your own converter for your own model": [[12, "write-your-own-converter-for-your-own-model"]], "Implementation of the new transform": [[12, "implementation-of-the-new-transform"]], "Experimentation on MNIST": [[12, "experimentation-on-mnist"]], "Repeatable t-SNE": [[12, "repeatable-t-sne"]], "ONNX - shape_calculator, converter": [[12, "onnx-shape-calculator-converter"]], "When a custom model is neither a classifier nor a regressor": [[13, "when-a-custom-model-is-neither-a-classifier-nor-a-regressor"]], "Iris and scoring": [[13, "iris-and-scoring"], [14, "iris-and-scoring"]], "Custom converter": [[13, "custom-converter"], [14, "custom-converter"], [56, "custom-converter"]], "Custom parser": [[13, "custom-parser"], [14, "custom-parser"]], "Final test": [[13, "final-test"], [14, "final-test"]], "When a custom model is neither a classifier nor a regressor (alternative)": [[14, "when-a-custom-model-is-neither-a-classifier-nor-a-regressor-alternative"]], "Discrepencies with GaussianProcessorRegressor: use of double": [[16, "discrepencies-with-gaussianprocessorregressor-use-of-double"]], "First attempt to convert a model into ONNX": [[16, "first-attempt-to-convert-a-model-into-onnx"]], "Second attempt: variable dimensions": [[16, "second-attempt-variable-dimensions"]], "Third attempt: use of double": [[16, "third-attempt-use-of-double"]], "Size increase": [[16, "size-increase"]], "return_std=True": [[16, "return-std-true"]], "Walk through intermediate outputs": [[17, "walk-through-intermediate-outputs"]], "Compute intermediate outputs": [[17, "compute-intermediate-outputs"]], "Display the sub-ONNX graph": [[17, "display-the-sub-onnx-graph"]], "Investigate a pipeline": [[18, "investigate-a-pipeline"]], "Pickle": [[18, "pickle"]], "Logging, verbose": [[19, "logging-verbose"]], "Conversion with parameter verbose": [[19, "conversion-with-parameter-verbose"]], "Conversion with logging": [[19, "conversion-with-logging"]], "Custom Operator for NMF Decomposition": [[21, "custom-operator-for-nmf-decomposition"]], "Building a simple model": [[21, "building-a-simple-model"]], "Conversion into ONNX": [[21, "conversion-into-onnx"], [46, "conversion-into-onnx"], [47, "conversion-into-onnx"], [48, "conversion-into-onnx"], [49, "conversion-into-onnx"]], "Play with ONNX operators": [[22, "play-with-onnx-operators"]], "ONNX Python API": [[22, "onnx-python-api"]], "Same example with sklearn-onnx": [[22, "same-example-with-sklearn-onnx"]], "Multiple operators": [[22, "multiple-operators"]], "Draw a pipeline": [[23, "draw-a-pipeline"]], "Retrieve a model in JSON format": [[23, "retrieve-a-model-in-json-format"]], "Draw a model with ONNX": [[23, "draw-a-model-with-onnx"]], "Convert a pipeline with a LightGbm model": [[24, "convert-a-pipeline-with-a-lightgbm-model"]], "Train a LightGBM classifier": [[24, "train-a-lightgbm-classifier"], [43, "train-a-lightgbm-classifier"]], "Register the converter for LGBMClassifier": [[24, "register-the-converter-for-lgbmclassifier"], [43, "register-the-converter-for-lgbmclassifier"], [44, "register-the-converter-for-lgbmclassifier"]], "Convert again": [[24, "convert-again"], [25, "convert-again"], [43, "convert-again"], [45, "convert-again"]], "Convert a pipeline with a XGBoost model": [[25, "convert-a-pipeline-with-a-xgboost-model"], [45, "convert-a-pipeline-with-a-xgboost-model"]], "Train a XGBoost classifier": [[25, "train-a-xgboost-classifier"], [45, "train-a-xgboost-classifier"]], "Register the converter for XGBClassifier": [[25, "register-the-converter-for-xgbclassifier"], [45, "register-the-converter-for-xgbclassifier"]], "TfIdfVectorizer with ONNX": [[26, "tfidfvectorizer-with-onnx"]], "Train a pipeline with TfidfVectorizer": [[26, "train-a-pipeline-with-tfidfvectorizer"]], "ONNX conversion": [[26, "onnx-conversion"]], "Computation times": [[27, "computation-times"], [58, "computation-times"]], "Examples": [[28, "examples"]], "Train and deploy a scikit-learn pipeline": [[29, "train-and-deploy-a-scikit-learn-pipeline"]], "Training a pipeline": [[29, "training-a-pipeline"], [30, "training-a-pipeline"]], "Converts the model": [[29, "converts-the-model"]], "Prediction with ONNX": [[29, "prediction-with-onnx"], [39, "prediction-with-onnx"]], "Comparison": [[29, "comparison"]], "Python runtime": [[29, "python-runtime"]], "Benchmark ONNX conversion": [[30, "benchmark-onnx-conversion"]], "Measure the processing time": [[30, "measure-the-processing-time"]], "ONNX runtime": [[30, "onnx-runtime"]], "Converter for WOEEncoder from categorical_encoder": [[31, "converter-for-woeencoder-from-categorical-encoder"]], "A simple example": [[31, "a-simple-example"], [57, "a-simple-example"]], "Custom converter for OrdinalEncoder": [[31, "custom-converter-for-ordinalencoder"]], "Custom converter for WOEEncoder": [[31, "custom-converter-for-woeencoder"]], "What is the opset number?": [[32, "what-is-the-opset-number"]], "Data": [[32, "data"]], "ONNX": [[32, "onnx"]], "ONNX and opset": [[32, "onnx-and-opset"]], "Other opsets": [[32, "other-opsets"]], "One model, many possible conversions with options": [[33, "one-model-many-possible-conversions-with-options"]], "Option zipmap": [[33, "option-zipmap"]], "Option in a pipeline": [[33, "option-in-a-pipeline"]], "Option raw_scores": [[33, "option-raw-scores"]], "Option decision_path": [[33, "option-decision-path"]], "List of available options": [[33, "list-of-available-options"]], "Black list operators when converting": [[34, "black-list-operators-when-converting"]], "Choose appropriate output of a classifier": [[35, "choose-appropriate-output-of-a-classifier"]], "Default behaviour: zipmap=True": [[35, "default-behaviour-zipmap-true"]], "Option zipmap=False": [[35, "option-zipmap-false"]], "Option zipmap=\u2019columns\u2019": [[35, "option-zipmap-columns"]], "Option zimpap=False and output_class_labels=True": [[35, "option-zimpap-false-and-output-class-labels-true"]], "MultiOutputClassifier": [[35, "multioutputclassifier"]], "Issues when switching to float": [[36, "issues-when-switching-to-float"]], "More into the issue": [[36, "more-into-the-issue"]], "The pipeline and the data": [[36, "the-pipeline-and-the-data"]], "The discrepencies": [[36, "the-discrepencies"]], "CastTransformer": [[36, "casttransformer"]], "Sledgehammer": [[36, "sledgehammer"]], "Intermediate results and investigation": [[37, "intermediate-results-and-investigation"]], "Look into pipeline steps": [[37, "look-into-pipeline-steps"]], "Python runtime to look into every node": [[37, "python-runtime-to-look-into-every-node"]], "Final graph": [[37, "final-graph"], [42, "final-graph"], [43, "final-graph"], [45, "final-graph"], [46, "final-graph"], [48, "final-graph"], [50, "final-graph"], [52, "final-graph"]], "Store arrays in one onnx graph": [[38, "store-arrays-in-one-onnx-graph"]], "Train and convert a model": [[38, "train-and-convert-a-model"]], "Add training parameter": [[38, "add-training-parameter"]], "Inference": [[38, "inference"]], "Select outputs": [[38, "select-outputs"]], "Save a model": [[38, "save-a-model"]], "Load a model": [[38, "load-a-model"]], "Dataframe as an input": [[39, "dataframe-as-an-input"]], "A dataset with categories": [[39, "a-dataset-with-categories"]], "Graph": [[39, "graph"]], "Unhide conversion logic with a dataframe": [[39, "unhide-conversion-logic-with-a-dataframe"]], "Transfer Learning with ONNX": [[40, "transfer-learning-with-onnx"]], "Retrieve and load a model": [[40, "retrieve-and-load-a-model"]], "Classifying an image": [[40, "classifying-an-image"]], "Classifying more images": [[40, "classifying-more-images"]], "Transfer learning in a pipeline": [[40, "transfer-learning-in-a-pipeline"]], "Graph for the PCA": [[40, "graph-for-the-pca"]], "Remove one layer at the end": [[40, "remove-one-layer-at-the-end"]], "Modify the ONNX graph": [[41, "modify-the-onnx-graph"]], "Basic example": [[41, "basic-example"]], "Changes the input names": [[41, "changes-the-input-names"]], "Changes the output names": [[41, "changes-the-output-names"]], "Renaming intermediate results": [[41, "renaming-intermediate-results"]], "Convert a pipeline with a CatBoost classifier": [[42, "convert-a-pipeline-with-a-catboost-classifier"]], "Train a CatBoostClassifier": [[42, "train-a-catboostclassifier"]], "Register the converter for CatBoostClassifier": [[42, "register-the-converter-for-catboostclassifier"]], "Convert": [[42, "convert"], [44, "convert"]], "Convert a pipeline with a LightGBM classifier": [[43, "convert-a-pipeline-with-a-lightgbm-classifier"]], "Convert a pipeline with a LightGBM regressor": [[44, "convert-a-pipeline-with-a-lightgbm-regressor"]], "Train a LGBMRegressor": [[44, "train-a-lgbmregressor"]], "Discrepancies": [[44, "discrepancies"]], "Split influence": [[44, "split-influence"]], "Same example with XGBRegressor": [[45, "same-example-with-xgbregressor"]], "Same with a Booster": [[45, "same-with-a-booster"]], "Implement a new converter": [[46, "implement-a-new-converter"]], "Custom model": [[46, "custom-model"], [47, "custom-model"], [48, "custom-model"], [49, "custom-model"]], "Two ways to implement a converter": [[47, "two-ways-to-implement-a-converter"]], "Implement a new converter using other converters": [[48, "implement-a-new-converter-using-other-converters"]], "A new converter with options": [[49, "a-new-converter-with-options"]], "Time comparison": [[49, "time-comparison"]], "Change the number of outputs by adding a parser": [[50, "change-the-number-of-outputs-by-adding-a-parser"]], "A new transformer": [[50, "a-new-transformer"]], "Conversion into ONNX with two outputs": [[50, "conversion-into-onnx-with-two-outputs"]], "Tricky issue when converting CountVectorizer or TfidfVectorizer": [[51, "tricky-issue-when-converting-countvectorizer-or-tfidfvectorizer"]], "A simple example impossible to convert": [[51, "a-simple-example-impossible-to-convert"]], "TraceableTfidfVectorizer": [[51, "traceabletfidfvectorizer"]], "Fast design with a python runtime": [[52, "fast-design-with-a-python-runtime"]], "A transformer which decorrelates variables": [[52, "a-transformer-which-decorrelates-variables"]], "Extend ONNX": [[52, "extend-onnx"]], "New ONNX operator": [[52, "new-onnx-operator"]], "shape calculator": [[52, "shape-calculator"]], "converter": [[52, "converter"]], "Runtime for Eig": [[52, "runtime-for-eig"]], "Registration": [[52, "registration"]], "Final example": [[52, "final-example"]], "Dealing with discrepancies (tf-idf)": [[54, "dealing-with-discrepancies-tf-idf"]], "Imports, setups": [[54, "imports-setups"], [55, "imports-setups"]], "Artificial datasets": [[54, "artificial-datasets"], [55, "artificial-datasets"]], "Fit a TfIdfVectorizer": [[54, "fit-a-tfidfvectorizer"]], "Execution with ONNX": [[54, "execution-with-onnx"]], "TfIdf and sparse matrices": [[55, "tfidf-and-sparse-matrices"]], "Train ensemble after sparse": [[55, "train-ensemble-after-sparse"]], "Dense data": [[55, "dense-data"]], "Dense data with nan": [[55, "dense-data-with-nan"]], "Dense, 0 replaced by nan": [[55, "dense-0-replaced-by-nan"]], "Conclusion": [[55, "conclusion"]], "Converter for pyod.models.iforest.IForest": [[56, "converter-for-pyod-models-iforest-iforest"]], "Trains a model": [[56, "trains-a-model"]], "Checking discrepencies": [[56, "checking-discrepencies"]], "Converter for WOE": [[57, "converter-for-woe"]], "One Hot": [[57, "one-hot"]], "ONNX Graphs": [[57, "onnx-graphs"]], "Half-line": [[57, "half-line"]], "Introduction": [[61, "introduction"]], "Quick start": [[61, "quick-start"]], "Step 1: Train a model using your favorite framework": [[61, "step-1-train-a-model-using-your-favorite-framework"]], "Step 2: Convert or export the model into ONNX format": [[61, "step-2-convert-or-export-the-model-into-onnx-format"]], "Step 3: Load and run the model using ONNX Runtime": [[61, "step-3-load-and-run-the-model-using-onnx-runtime"]], "convert_sklearn, to_onnx, initial_types": [[61, "convert-sklearn-to-onnx-initial-types"]], "Converters with options": [[62, "converters-with-options"]], "GaussianProcessRegressor, NearestNeighbors": [[62, "gaussianprocessregressor-nearestneighbors"]], "TfidfVectorizer, CountVectorizer": [[62, "tfidfvectorizer-countvectorizer"]], "Additional options": [[62, "additional-options"]], "Classifiers": [[62, "classifiers"]], "ZipMap": [[62, "zipmap"]], "Class information": [[62, "class-information"]], "Raw scores": [[62, "raw-scores"]], "Pickability and Pipeline": [[62, "pickability-and-pipeline"]], "Convert a pipeline": [[63, "convert-a-pipeline"]], "Convert complex pipelines": [[63, "convert-complex-pipelines"]], "Parser, shape calculator, converter": [[63, "parser-shape-calculator-converter"]], "New converters in a pipeline": [[63, "new-converters-in-a-pipeline"]], "Titanic example": [[63, "titanic-example"]], "Parameterize the conversion": [[63, "parameterize-the-conversion"]], "Investigate discrepencies": [[63, "investigate-discrepencies"]], "Investigate missing converters": [[63, "investigate-missing-converters"]], "Supported scikit-learn Models": [[64, "supported-scikit-learn-models"]], "Covered Converters": [[64, "covered-converters"]], "Converters Documentation": [[64, "converters-documentation"]], "OnnxBooster": [[64, "onnxbooster"]], "OnnxCastRegressor": [[64, "onnxcastregressor"]], "OnnxCastTransformer": [[64, "onnxcasttransformer"]], "OnnxCatBoostClassifier": [[64, "onnxcatboostclassifier"]], "OnnxCustomScorerTransform": [[64, "onnxcustomscorertransform"]], "OnnxDecorrelateTransformer": [[64, "onnxdecorrelatetransformer"]], "OnnxIForest": [[64, "onnxiforest"]], "OnnxLiveDecorrelateTransformer": [[64, "onnxlivedecorrelatetransformer"]], "OnnxMockWrappedLightGbmBoosterClassifier": [[64, "onnxmockwrappedlightgbmboosterclassifier"]], "OnnxOrdinalEncoder": [[64, "onnxordinalencoder"]], "OnnxPredictableTSNE": [[64, "onnxpredictabletsne"]], "OnnxReplaceTransformer": [[64, "onnxreplacetransformer"]], "OnnxSklearnARDRegression": [[64, "onnxsklearnardregression"]], "OnnxSklearnAdaBoostClassifier": [[64, "onnxsklearnadaboostclassifier"]], "OnnxSklearnAdaBoostRegressor": [[64, "onnxsklearnadaboostregressor"]], "OnnxSklearnBaggingClassifier": [[64, "onnxsklearnbaggingclassifier"]], "OnnxSklearnBaggingRegressor": [[64, "onnxsklearnbaggingregressor"]], "OnnxSklearnBayesianGaussianMixture": [[64, "onnxsklearnbayesiangaussianmixture"]], "OnnxSklearnBayesianRidge": [[64, "onnxsklearnbayesianridge"]], "OnnxSklearnBernoulliNB": [[64, "onnxsklearnbernoullinb"]], "OnnxSklearnBinarizer": [[64, "onnxsklearnbinarizer"]], "OnnxSklearnCalibratedClassifierCV": [[64, "onnxsklearncalibratedclassifiercv"]], "OnnxSklearnCategoricalNB": [[64, "onnxsklearncategoricalnb"]], "OnnxSklearnColumnTransformer": [[64, "onnxsklearncolumntransformer"]], "OnnxSklearnComplementNB": [[64, "onnxsklearncomplementnb"]], "OnnxSklearnCountVectorizer": [[64, "onnxsklearncountvectorizer"]], "OnnxSklearnDecisionTreeClassifier": [[64, "onnxsklearndecisiontreeclassifier"]], "OnnxSklearnDecisionTreeRegressor": [[64, "onnxsklearndecisiontreeregressor"]], "OnnxSklearnDictVectorizer": [[64, "onnxsklearndictvectorizer"]], "OnnxSklearnElasticNet": [[64, "onnxsklearnelasticnet"]], "OnnxSklearnElasticNetCV": [[64, "onnxsklearnelasticnetcv"]], "OnnxSklearnExtraTreeClassifier": [[64, "onnxsklearnextratreeclassifier"]], "OnnxSklearnExtraTreeRegressor": [[64, "onnxsklearnextratreeregressor"]], "OnnxSklearnExtraTreesClassifier": [[64, "onnxsklearnextratreesclassifier"]], "OnnxSklearnExtraTreesRegressor": [[64, "onnxsklearnextratreesregressor"]], "OnnxSklearnFeatureHasher": [[64, "onnxsklearnfeaturehasher"]], "OnnxSklearnFeatureUnion": [[64, "onnxsklearnfeatureunion"]], "OnnxSklearnFunctionTransformer": [[64, "onnxsklearnfunctiontransformer"]], "OnnxSklearnGammaRegressor": [[64, "onnxsklearngammaregressor"]], "OnnxSklearnGaussianMixture": [[64, "onnxsklearngaussianmixture"]], "OnnxSklearnGaussianNB": [[64, "onnxsklearngaussiannb"]], "OnnxSklearnGaussianProcessClassifier": [[64, "onnxsklearngaussianprocessclassifier"]], "OnnxSklearnGaussianProcessRegressor": [[64, "onnxsklearngaussianprocessregressor"]], "OnnxSklearnGaussianRandomProjection": [[64, "onnxsklearngaussianrandomprojection"]], "OnnxSklearnGenericUnivariateSelect": [[64, "onnxsklearngenericunivariateselect"]], "OnnxSklearnGradientBoostingClassifier": [[64, "onnxsklearngradientboostingclassifier"]], "OnnxSklearnGradientBoostingRegressor": [[64, "onnxsklearngradientboostingregressor"]], "OnnxSklearnGridSearchCV": [[64, "onnxsklearngridsearchcv"]], "OnnxSklearnHistGradientBoostingClassifier": [[64, "onnxsklearnhistgradientboostingclassifier"]], "OnnxSklearnHistGradientBoostingRegressor": [[64, "onnxsklearnhistgradientboostingregressor"]], "OnnxSklearnHuberRegressor": [[64, "onnxsklearnhuberregressor"]], "OnnxSklearnIncrementalPCA": [[64, "onnxsklearnincrementalpca"]], "OnnxSklearnIsolationForest": [[64, "onnxsklearnisolationforest"]], "OnnxSklearnKBinsDiscretizer": [[64, "onnxsklearnkbinsdiscretizer"]], "OnnxSklearnKMeans": [[64, "onnxsklearnkmeans"]], "OnnxSklearnKNNImputer": [[64, "onnxsklearnknnimputer"]], "OnnxSklearnKNeighborsClassifier": [[64, "onnxsklearnkneighborsclassifier"]], "OnnxSklearnKNeighborsRegressor": [[64, "onnxsklearnkneighborsregressor"]], "OnnxSklearnKNeighborsTransformer": [[64, "onnxsklearnkneighborstransformer"]], "OnnxSklearnKernelCenterer": [[64, "onnxsklearnkernelcenterer"]], "OnnxSklearnKernelPCA": [[64, "onnxsklearnkernelpca"]], "OnnxSklearnLGBMClassifier": [[64, "onnxsklearnlgbmclassifier"]], "OnnxSklearnLGBMRegressor": [[64, "onnxsklearnlgbmregressor"]], "OnnxSklearnLabelBinarizer": [[64, "onnxsklearnlabelbinarizer"]], "OnnxSklearnLabelEncoder": [[64, "onnxsklearnlabelencoder"]], "OnnxSklearnLars": [[64, "onnxsklearnlars"]], "OnnxSklearnLarsCV": [[64, "onnxsklearnlarscv"]], "OnnxSklearnLasso": [[64, "onnxsklearnlasso"]], "OnnxSklearnLassoCV": [[64, "onnxsklearnlassocv"]], "OnnxSklearnLassoLars": [[64, "onnxsklearnlassolars"]], "OnnxSklearnLassoLarsCV": [[64, "onnxsklearnlassolarscv"]], "OnnxSklearnLassoLarsIC": [[64, "onnxsklearnlassolarsic"]], "OnnxSklearnLinearDiscriminantAnalysis": [[64, "onnxsklearnlineardiscriminantanalysis"]], "OnnxSklearnLinearRegression": [[64, "onnxsklearnlinearregression"]], "OnnxSklearnLinearSVC": [[64, "onnxsklearnlinearsvc"]], "OnnxSklearnLinearSVR": [[64, "onnxsklearnlinearsvr"]], "OnnxSklearnLocalOutlierFactor": [[64, "onnxsklearnlocaloutlierfactor"]], "OnnxSklearnLogisticRegression": [[64, "onnxsklearnlogisticregression"]], "OnnxSklearnLogisticRegressionCV": [[64, "onnxsklearnlogisticregressioncv"]], "OnnxSklearnMLPClassifier": [[64, "onnxsklearnmlpclassifier"]], "OnnxSklearnMLPRegressor": [[64, "onnxsklearnmlpregressor"]], "OnnxSklearnMaxAbsScaler": [[64, "onnxsklearnmaxabsscaler"]], "OnnxSklearnMinMaxScaler": [[64, "onnxsklearnminmaxscaler"]], "OnnxSklearnMiniBatchKMeans": [[64, "onnxsklearnminibatchkmeans"]], "OnnxSklearnMultiOutputClassifier": [[64, "onnxsklearnmultioutputclassifier"]], "OnnxSklearnMultiOutputRegressor": [[64, "onnxsklearnmultioutputregressor"]], "OnnxSklearnMultiTaskElasticNet": [[64, "onnxsklearnmultitaskelasticnet"]], "OnnxSklearnMultiTaskElasticNetCV": [[64, "onnxsklearnmultitaskelasticnetcv"]], "OnnxSklearnMultiTaskLasso": [[64, "onnxsklearnmultitasklasso"]], "OnnxSklearnMultiTaskLassoCV": [[64, "onnxsklearnmultitasklassocv"]], "OnnxSklearnMultinomialNB": [[64, "onnxsklearnmultinomialnb"]], "OnnxSklearnNearestNeighbors": [[64, "onnxsklearnnearestneighbors"]], "OnnxSklearnNeighborhoodComponentsAnalysis": [[64, "onnxsklearnneighborhoodcomponentsanalysis"]], "OnnxSklearnNormalizer": [[64, "onnxsklearnnormalizer"]], "OnnxSklearnNuSVC": [[64, "onnxsklearnnusvc"]], "OnnxSklearnNuSVR": [[64, "onnxsklearnnusvr"]], "OnnxSklearnOneClassSVM": [[64, "onnxsklearnoneclasssvm"]], "OnnxSklearnOneHotEncoder": [[64, "onnxsklearnonehotencoder"]], "OnnxSklearnOneVsOneClassifier": [[64, "onnxsklearnonevsoneclassifier"]], "OnnxSklearnOneVsRestClassifier": [[64, "onnxsklearnonevsrestclassifier"]], "OnnxSklearnOrdinalEncoder": [[64, "onnxsklearnordinalencoder"]], "OnnxSklearnOrthogonalMatchingPursuit": [[64, "onnxsklearnorthogonalmatchingpursuit"]], "OnnxSklearnOrthogonalMatchingPursuitCV": [[64, "onnxsklearnorthogonalmatchingpursuitcv"]], "OnnxSklearnPCA": [[64, "onnxsklearnpca"]], "OnnxSklearnPLSRegression": [[64, "onnxsklearnplsregression"]], "OnnxSklearnPassiveAggressiveClassifier": [[64, "onnxsklearnpassiveaggressiveclassifier"]], "OnnxSklearnPassiveAggressiveRegressor": [[64, "onnxsklearnpassiveaggressiveregressor"]], "OnnxSklearnPerceptron": [[64, "onnxsklearnperceptron"]], "OnnxSklearnPipeline": [[64, "onnxsklearnpipeline"]], "OnnxSklearnPoissonRegressor": [[64, "onnxsklearnpoissonregressor"]], "OnnxSklearnPolynomialFeatures": [[64, "onnxsklearnpolynomialfeatures"]], "OnnxSklearnPowerTransformer": [[64, "onnxsklearnpowertransformer"]], "OnnxSklearnQuadraticDiscriminantAnalysis": [[64, "onnxsklearnquadraticdiscriminantanalysis"]], "OnnxSklearnQuantileRegressor": [[64, "onnxsklearnquantileregressor"]], "OnnxSklearnRANSACRegressor": [[64, "onnxsklearnransacregressor"]], "OnnxSklearnRFE": [[64, "onnxsklearnrfe"]], "OnnxSklearnRFECV": [[64, "onnxsklearnrfecv"]], "OnnxSklearnRadiusNeighborsClassifier": [[64, "onnxsklearnradiusneighborsclassifier"]], "OnnxSklearnRadiusNeighborsRegressor": [[64, "onnxsklearnradiusneighborsregressor"]], "OnnxSklearnRandomForestClassifier": [[64, "onnxsklearnrandomforestclassifier"]], "OnnxSklearnRandomForestRegressor": [[64, "onnxsklearnrandomforestregressor"]], "OnnxSklearnRandomTreesEmbedding": [[64, "onnxsklearnrandomtreesembedding"]], "OnnxSklearnRidge": [[64, "onnxsklearnridge"]], "OnnxSklearnRidgeCV": [[64, "onnxsklearnridgecv"]], "OnnxSklearnRidgeClassifier": [[64, "onnxsklearnridgeclassifier"]], "OnnxSklearnRidgeClassifierCV": [[64, "onnxsklearnridgeclassifiercv"]], "OnnxSklearnRobustScaler": [[64, "onnxsklearnrobustscaler"]], "OnnxSklearnSGDClassifier": [[64, "onnxsklearnsgdclassifier"]], "OnnxSklearnSGDOneClassSVM": [[64, "onnxsklearnsgdoneclasssvm"]], "OnnxSklearnSGDRegressor": [[64, "onnxsklearnsgdregressor"]], "OnnxSklearnSVC": [[64, "onnxsklearnsvc"]], "OnnxSklearnSVR": [[64, "onnxsklearnsvr"]], "OnnxSklearnSelectFdr": [[64, "onnxsklearnselectfdr"]], "OnnxSklearnSelectFpr": [[64, "onnxsklearnselectfpr"]], "OnnxSklearnSelectFromModel": [[64, "onnxsklearnselectfrommodel"]], "OnnxSklearnSelectFwe": [[64, "onnxsklearnselectfwe"]], "OnnxSklearnSelectKBest": [[64, "onnxsklearnselectkbest"]], "OnnxSklearnSelectPercentile": [[64, "onnxsklearnselectpercentile"]], "OnnxSklearnSimpleImputer": [[64, "onnxsklearnsimpleimputer"]], "OnnxSklearnStackingClassifier": [[64, "onnxsklearnstackingclassifier"]], "OnnxSklearnStackingRegressor": [[64, "onnxsklearnstackingregressor"]], "OnnxSklearnStandardScaler": [[64, "onnxsklearnstandardscaler"]], "OnnxSklearnTfidfTransformer": [[64, "onnxsklearntfidftransformer"]], "OnnxSklearnTfidfVectorizer": [[64, "onnxsklearntfidfvectorizer"]], "OnnxSklearnTheilSenRegressor": [[64, "onnxsklearntheilsenregressor"]], "OnnxSklearnTraceableCountVectorizer": [[64, "onnxsklearntraceablecountvectorizer"]], "OnnxSklearnTraceableTfidfVectorizer": [[64, "onnxsklearntraceabletfidfvectorizer"]], "OnnxSklearnTransformedTargetRegressor": [[64, "onnxsklearntransformedtargetregressor"]], "OnnxSklearnTruncatedSVD": [[64, "onnxsklearntruncatedsvd"]], "OnnxSklearnTweedieRegressor": [[64, "onnxsklearntweedieregressor"]], "OnnxSklearnVarianceThreshold": [[64, "onnxsklearnvariancethreshold"]], "OnnxSklearnVotingClassifier": [[64, "onnxsklearnvotingclassifier"]], "OnnxSklearnVotingRegressor": [[64, "onnxsklearnvotingregressor"]], "OnnxSklearnXGBClassifier": [[64, "onnxsklearnxgbclassifier"]], "OnnxSklearnXGBRegressor": [[64, "onnxsklearnxgbregressor"]], "OnnxTransferTransformer": [[64, "onnxtransfertransformer"]], "OnnxValidatorClassifier": [[64, "onnxvalidatorclassifier"]], "OnnxWOEEncoder": [[64, "onnxwoeencoder"]], "OnnxWOETransformer": [[64, "onnxwoetransformer"]], "OnnxWrappedLightGbmBooster": [[64, "onnxwrappedlightgbmbooster"]], "OnnxWrappedLightGbmBoosterClassifier": [[64, "onnxwrappedlightgbmboosterclassifier"]], "Pipeline": [[64, "pipeline"]], "Available ONNX operators": [[64, "available-onnx-operators"]], "OnnxAbs": [[64, "onnxabs"]], "OnnxAbs_1": [[64, "onnxabs-1"]], "OnnxAbs_13": [[64, "onnxabs-13"]], "OnnxAbs_6": [[64, "onnxabs-6"]], "OnnxAcos": [[64, "onnxacos"]], "OnnxAcos_7": [[64, "onnxacos-7"]], "OnnxAcosh": [[64, "onnxacosh"]], "OnnxAcosh_9": [[64, "onnxacosh-9"]], "OnnxAdagrad": [[64, "onnxadagrad"]], "OnnxAdagrad_1": [[64, "onnxadagrad-1"]], "OnnxAdam": [[64, "onnxadam"]], "OnnxAdam_1": [[64, "onnxadam-1"]], "OnnxAdd": [[64, "onnxadd"]], "OnnxAdd_1": [[64, "onnxadd-1"]], "OnnxAdd_13": [[64, "onnxadd-13"]], "OnnxAdd_14": [[64, "onnxadd-14"]], "OnnxAdd_6": [[64, "onnxadd-6"]], "OnnxAdd_7": [[64, "onnxadd-7"]], "OnnxAnd": [[64, "onnxand"]], "OnnxAnd_1": [[64, "onnxand-1"]], "OnnxAnd_7": [[64, "onnxand-7"]], "OnnxArgMax": [[64, "onnxargmax"]], "OnnxArgMax_1": [[64, "onnxargmax-1"]], "OnnxArgMax_11": [[64, "onnxargmax-11"]], "OnnxArgMax_12": [[64, "onnxargmax-12"]], "OnnxArgMax_13": [[64, "onnxargmax-13"]], "OnnxArgMin": [[64, "onnxargmin"]], "OnnxArgMin_1": [[64, "onnxargmin-1"]], "OnnxArgMin_11": [[64, "onnxargmin-11"]], "OnnxArgMin_12": [[64, "onnxargmin-12"]], "OnnxArgMin_13": [[64, "onnxargmin-13"]], "OnnxArrayFeatureExtractor": [[64, "onnxarrayfeatureextractor"]], "OnnxArrayFeatureExtractor_1": [[64, "onnxarrayfeatureextractor-1"]], "OnnxAsin": [[64, "onnxasin"]], "OnnxAsin_7": [[64, "onnxasin-7"]], "OnnxAsinh": [[64, "onnxasinh"]], "OnnxAsinh_9": [[64, "onnxasinh-9"]], "OnnxAtan": [[64, "onnxatan"]], "OnnxAtan_7": [[64, "onnxatan-7"]], "OnnxAtanh": [[64, "onnxatanh"]], "OnnxAtanh_9": [[64, "onnxatanh-9"]], "OnnxAveragePool": [[64, "onnxaveragepool"]], "or": [[64, "or"], [64, "id37"], [64, "id39"], [64, "id41"], [64, "id328"], [64, "id332"], [64, "id339"], [64, "id342"], [64, "id344"], [64, "id346"], [64, "id574"], [64, "id577"]], "OnnxAveragePool_1": [[64, "onnxaveragepool-1"]], "OnnxAveragePool_10": [[64, "onnxaveragepool-10"]], "OnnxAveragePool_11": [[64, "onnxaveragepool-11"]], "OnnxAveragePool_19": [[64, "onnxaveragepool-19"]], "OnnxAveragePool_7": [[64, "onnxaveragepool-7"]], "OnnxBatchNormalization": [[64, "onnxbatchnormalization"]], "OnnxBatchNormalization_1": [[64, "onnxbatchnormalization-1"]], "OnnxBatchNormalization_14": [[64, "onnxbatchnormalization-14"]], "OnnxBatchNormalization_15": [[64, "onnxbatchnormalization-15"]], "OnnxBatchNormalization_6": [[64, "onnxbatchnormalization-6"]], "OnnxBatchNormalization_7": [[64, "onnxbatchnormalization-7"]], "OnnxBatchNormalization_9": [[64, "onnxbatchnormalization-9"]], "OnnxBernoulli": [[64, "onnxbernoulli"]], "OnnxBernoulli_15": [[64, "onnxbernoulli-15"]], "OnnxBinarizer": [[64, "onnxbinarizer"]], "OnnxBinarizer_1": [[64, "onnxbinarizer-1"]], "OnnxBitShift": [[64, "onnxbitshift"]], "OnnxBitShift_11": [[64, "onnxbitshift-11"]], "OnnxBitwiseAnd": [[64, "onnxbitwiseand"]], "OnnxBitwiseAnd_18": [[64, "onnxbitwiseand-18"]], "OnnxBitwiseNot": [[64, "onnxbitwisenot"]], "OnnxBitwiseNot_18": [[64, "onnxbitwisenot-18"]], "OnnxBitwiseOr": [[64, "onnxbitwiseor"]], "OnnxBitwiseOr_18": [[64, "onnxbitwiseor-18"]], "OnnxBitwiseXor": [[64, "onnxbitwisexor"]], "OnnxBitwiseXor_18": [[64, "onnxbitwisexor-18"]], "OnnxBlackmanWindow": [[64, "onnxblackmanwindow"]], "OnnxBlackmanWindow_17": [[64, "onnxblackmanwindow-17"]], "OnnxCast": [[64, "onnxcast"]], "OnnxCastLike": [[64, "onnxcastlike"]], "OnnxCastLike_15": [[64, "onnxcastlike-15"]], "OnnxCastMap": [[64, "onnxcastmap"]], "OnnxCastMap_1": [[64, "onnxcastmap-1"]], "OnnxCast_1": [[64, "onnxcast-1"]], "OnnxCast_13": [[64, "onnxcast-13"]], "OnnxCast_6": [[64, "onnxcast-6"]], "OnnxCast_9": [[64, "onnxcast-9"]], "OnnxCategoryMapper": [[64, "onnxcategorymapper"]], "OnnxCategoryMapper_1": [[64, "onnxcategorymapper-1"]], "OnnxCeil": [[64, "onnxceil"]], "OnnxCeil_1": [[64, "onnxceil-1"]], "OnnxCeil_13": [[64, "onnxceil-13"]], "OnnxCeil_6": [[64, "onnxceil-6"]], "OnnxCelu": [[64, "onnxcelu"]], "OnnxCelu_12": [[64, "onnxcelu-12"]], "OnnxCenterCropPad": [[64, "onnxcentercroppad"]], "OnnxCenterCropPad_18": [[64, "onnxcentercroppad-18"]], "OnnxClip": [[64, "onnxclip"]], "OnnxClip_1": [[64, "onnxclip-1"]], "OnnxClip_11": [[64, "onnxclip-11"]], "OnnxClip_12": [[64, "onnxclip-12"]], "OnnxClip_13": [[64, "onnxclip-13"]], "OnnxClip_6": [[64, "onnxclip-6"]], "OnnxCol2Im": [[64, "onnxcol2im"]], "OnnxCol2Im_18": [[64, "onnxcol2im-18"]], "OnnxCompress": [[64, "onnxcompress"]], "OnnxCompress_11": [[64, "onnxcompress-11"]], "OnnxCompress_9": [[64, "onnxcompress-9"]], "OnnxConcat": [[64, "onnxconcat"]], "OnnxConcatFromSequence": [[64, "onnxconcatfromsequence"]], "OnnxConcatFromSequence_11": [[64, "onnxconcatfromsequence-11"]], "OnnxConcat_1": [[64, "onnxconcat-1"]], "OnnxConcat_11": [[64, "onnxconcat-11"]], "OnnxConcat_13": [[64, "onnxconcat-13"]], "OnnxConcat_4": [[64, "onnxconcat-4"]], "OnnxConstant": [[64, "onnxconstant"]], "OnnxConstantOfShape": [[64, "onnxconstantofshape"]], "OnnxConstantOfShape_9": [[64, "onnxconstantofshape-9"]], "OnnxConstant_1": [[64, "onnxconstant-1"]], "OnnxConstant_11": [[64, "onnxconstant-11"]], "OnnxConstant_12": [[64, "onnxconstant-12"]], "OnnxConstant_13": [[64, "onnxconstant-13"]], "OnnxConstant_9": [[64, "onnxconstant-9"]], "OnnxConv": [[64, "onnxconv"]], "OnnxConvInteger": [[64, "onnxconvinteger"]], "OnnxConvInteger_10": [[64, "onnxconvinteger-10"]], "OnnxConvTranspose": [[64, "onnxconvtranspose"]], "OnnxConvTranspose_1": [[64, "onnxconvtranspose-1"]], "OnnxConvTranspose_11": [[64, "onnxconvtranspose-11"]], "OnnxConv_1": [[64, "onnxconv-1"]], "OnnxConv_11": [[64, "onnxconv-11"]], "OnnxCos": [[64, "onnxcos"]], "OnnxCos_7": [[64, "onnxcos-7"]], "OnnxCosh": [[64, "onnxcosh"]], "OnnxCosh_9": [[64, "onnxcosh-9"]], "OnnxCumSum": [[64, "onnxcumsum"]], "OnnxCumSum_11": [[64, "onnxcumsum-11"]], "OnnxCumSum_14": [[64, "onnxcumsum-14"]], "OnnxDFT": [[64, "onnxdft"]], "OnnxDFT_17": [[64, "onnxdft-17"]], "OnnxDepthToSpace": [[64, "onnxdepthtospace"]], "OnnxDepthToSpace_1": [[64, "onnxdepthtospace-1"]], "OnnxDepthToSpace_11": [[64, "onnxdepthtospace-11"]], "OnnxDepthToSpace_13": [[64, "onnxdepthtospace-13"]], "OnnxDequantizeLinear": [[64, "onnxdequantizelinear"]], "OnnxDequantizeLinear_10": [[64, "onnxdequantizelinear-10"]], "OnnxDequantizeLinear_13": [[64, "onnxdequantizelinear-13"]], "OnnxDet": [[64, "onnxdet"]], "OnnxDet_11": [[64, "onnxdet-11"]], "OnnxDictVectorizer": [[64, "onnxdictvectorizer"]], "OnnxDictVectorizer_1": [[64, "onnxdictvectorizer-1"]], "OnnxDiv": [[64, "onnxdiv"]], "OnnxDiv_1": [[64, "onnxdiv-1"]], "OnnxDiv_13": [[64, "onnxdiv-13"]], "OnnxDiv_14": [[64, "onnxdiv-14"]], "OnnxDiv_6": [[64, "onnxdiv-6"]], "OnnxDiv_7": [[64, "onnxdiv-7"]], "OnnxDropout": [[64, "onnxdropout"]], "OnnxDropout_1": [[64, "onnxdropout-1"]], "OnnxDropout_10": [[64, "onnxdropout-10"]], "OnnxDropout_12": [[64, "onnxdropout-12"]], "OnnxDropout_13": [[64, "onnxdropout-13"]], "OnnxDropout_6": [[64, "onnxdropout-6"]], "OnnxDropout_7": [[64, "onnxdropout-7"]], "OnnxDynamicQuantizeLinear": [[64, "onnxdynamicquantizelinear"]], "OnnxDynamicQuantizeLinear_11": [[64, "onnxdynamicquantizelinear-11"]], "OnnxEinsum": [[64, "onnxeinsum"]], "OnnxEinsum_12": [[64, "onnxeinsum-12"]], "OnnxElu": [[64, "onnxelu"]], "OnnxElu_1": [[64, "onnxelu-1"]], "OnnxElu_6": [[64, "onnxelu-6"]], "OnnxEqual": [[64, "onnxequal"]], "OnnxEqual_1": [[64, "onnxequal-1"]], "OnnxEqual_11": [[64, "onnxequal-11"]], "OnnxEqual_13": [[64, "onnxequal-13"]], "OnnxEqual_19": [[64, "onnxequal-19"]], "OnnxEqual_7": [[64, "onnxequal-7"]], "OnnxErf": [[64, "onnxerf"]], "OnnxErf_13": [[64, "onnxerf-13"]], "OnnxErf_9": [[64, "onnxerf-9"]], "OnnxExp": [[64, "onnxexp"]], "OnnxExp_1": [[64, "onnxexp-1"]], "OnnxExp_13": [[64, "onnxexp-13"]], "OnnxExp_6": [[64, "onnxexp-6"]], "OnnxExpand": [[64, "onnxexpand"]], "OnnxExpand_13": [[64, "onnxexpand-13"]], "OnnxExpand_8": [[64, "onnxexpand-8"]], "OnnxEyeLike": [[64, "onnxeyelike"]], "OnnxEyeLike_9": [[64, "onnxeyelike-9"]], "OnnxFeatureVectorizer": [[64, "onnxfeaturevectorizer"]], "OnnxFeatureVectorizer_1": [[64, "onnxfeaturevectorizer-1"]], "OnnxFlatten": [[64, "onnxflatten"]], "OnnxFlatten_1": [[64, "onnxflatten-1"]], "OnnxFlatten_11": [[64, "onnxflatten-11"]], "OnnxFlatten_13": [[64, "onnxflatten-13"]], "OnnxFlatten_9": [[64, "onnxflatten-9"]], "OnnxFloor": [[64, "onnxfloor"]], "OnnxFloor_1": [[64, "onnxfloor-1"]], "OnnxFloor_13": [[64, "onnxfloor-13"]], "OnnxFloor_6": [[64, "onnxfloor-6"]], "OnnxGRU": [[64, "onnxgru"]], "OnnxGRU_1": [[64, "onnxgru-1"]], "OnnxGRU_14": [[64, "onnxgru-14"]], "OnnxGRU_3": [[64, "onnxgru-3"]], "OnnxGRU_7": [[64, "onnxgru-7"]], "OnnxGather": [[64, "onnxgather"]], "OnnxGatherElements": [[64, "onnxgatherelements"]], "OnnxGatherElements_11": [[64, "onnxgatherelements-11"]], "OnnxGatherElements_13": [[64, "onnxgatherelements-13"]], "OnnxGatherND": [[64, "onnxgathernd"]], "OnnxGatherND_11": [[64, "onnxgathernd-11"]], "OnnxGatherND_12": [[64, "onnxgathernd-12"]], "OnnxGatherND_13": [[64, "onnxgathernd-13"]], "OnnxGather_1": [[64, "onnxgather-1"]], "OnnxGather_11": [[64, "onnxgather-11"]], "OnnxGather_13": [[64, "onnxgather-13"]], "OnnxGemm": [[64, "onnxgemm"]], "OnnxGemm_1": [[64, "onnxgemm-1"]], "OnnxGemm_11": [[64, "onnxgemm-11"]], "OnnxGemm_13": [[64, "onnxgemm-13"]], "OnnxGemm_6": [[64, "onnxgemm-6"]], "OnnxGemm_7": [[64, "onnxgemm-7"]], "OnnxGemm_9": [[64, "onnxgemm-9"]], "OnnxGlobalAveragePool": [[64, "onnxglobalaveragepool"]], "OnnxGlobalAveragePool_1": [[64, "onnxglobalaveragepool-1"]], "OnnxGlobalLpPool": [[64, "onnxgloballppool"]], "OnnxGlobalLpPool_1": [[64, "onnxgloballppool-1"]], "OnnxGlobalLpPool_2": [[64, "onnxgloballppool-2"]], "OnnxGlobalMaxPool": [[64, "onnxglobalmaxpool"]], "OnnxGlobalMaxPool_1": [[64, "onnxglobalmaxpool-1"]], "OnnxGradient": [[64, "onnxgradient"]], "OnnxGradient_1": [[64, "onnxgradient-1"]], "OnnxGreater": [[64, "onnxgreater"]], "OnnxGreaterOrEqual": [[64, "onnxgreaterorequal"]], "OnnxGreaterOrEqual_12": [[64, "onnxgreaterorequal-12"]], "OnnxGreaterOrEqual_16": [[64, "onnxgreaterorequal-16"]], "OnnxGreater_1": [[64, "onnxgreater-1"]], "OnnxGreater_13": [[64, "onnxgreater-13"]], "OnnxGreater_7": [[64, "onnxgreater-7"]], "OnnxGreater_9": [[64, "onnxgreater-9"]], "OnnxGridSample": [[64, "onnxgridsample"]], "OnnxGridSample_16": [[64, "onnxgridsample-16"]], "OnnxGroupNormalization": [[64, "onnxgroupnormalization"]], "OnnxGroupNormalization_18": [[64, "onnxgroupnormalization-18"]], "OnnxHammingWindow": [[64, "onnxhammingwindow"]], "OnnxHammingWindow_17": [[64, "onnxhammingwindow-17"]], "OnnxHannWindow": [[64, "onnxhannwindow"]], "OnnxHannWindow_17": [[64, "onnxhannwindow-17"]], "OnnxHardSigmoid": [[64, "onnxhardsigmoid"]], "OnnxHardSigmoid_1": [[64, "onnxhardsigmoid-1"]], "OnnxHardSigmoid_6": [[64, "onnxhardsigmoid-6"]], "OnnxHardSwish": [[64, "onnxhardswish"]], "OnnxHardSwish_14": [[64, "onnxhardswish-14"]], "OnnxHardmax": [[64, "onnxhardmax"]], "OnnxHardmax_1": [[64, "onnxhardmax-1"]], "OnnxHardmax_11": [[64, "onnxhardmax-11"]], "OnnxHardmax_13": [[64, "onnxhardmax-13"]], "OnnxIdentity": [[64, "onnxidentity"]], "OnnxIdentity_1": [[64, "onnxidentity-1"]], "OnnxIdentity_13": [[64, "onnxidentity-13"]], "OnnxIdentity_14": [[64, "onnxidentity-14"]], "OnnxIdentity_16": [[64, "onnxidentity-16"]], "OnnxIf": [[64, "onnxif"]], "OnnxIf_1": [[64, "onnxif-1"]], "OnnxIf_11": [[64, "onnxif-11"]], "OnnxIf_13": [[64, "onnxif-13"]], "OnnxIf_16": [[64, "onnxif-16"]], "OnnxImputer": [[64, "onnximputer"]], "OnnxImputer_1": [[64, "onnximputer-1"]], "OnnxInstanceNormalization": [[64, "onnxinstancenormalization"]], "OnnxInstanceNormalization_1": [[64, "onnxinstancenormalization-1"]], "OnnxInstanceNormalization_6": [[64, "onnxinstancenormalization-6"]], "OnnxIsInf": [[64, "onnxisinf"]], "OnnxIsInf_10": [[64, "onnxisinf-10"]], "OnnxIsNaN": [[64, "onnxisnan"]], "OnnxIsNaN_13": [[64, "onnxisnan-13"]], "OnnxIsNaN_9": [[64, "onnxisnan-9"]], "OnnxLRN": [[64, "onnxlrn"]], "OnnxLRN_1": [[64, "onnxlrn-1"]], "OnnxLRN_13": [[64, "onnxlrn-13"]], "OnnxLSTM": [[64, "onnxlstm"]], "OnnxLSTM_1": [[64, "onnxlstm-1"]], "OnnxLSTM_14": [[64, "onnxlstm-14"]], "OnnxLSTM_7": [[64, "onnxlstm-7"]], "OnnxLabelEncoder": [[64, "onnxlabelencoder"]], "OnnxLabelEncoder_1": [[64, "onnxlabelencoder-1"]], "OnnxLabelEncoder_2": [[64, "onnxlabelencoder-2"]], "OnnxLayerNormalization": [[64, "onnxlayernormalization"]], "OnnxLayerNormalization_17": [[64, "onnxlayernormalization-17"]], "OnnxLeakyRelu": [[64, "onnxleakyrelu"]], "OnnxLeakyRelu_1": [[64, "onnxleakyrelu-1"]], "OnnxLeakyRelu_16": [[64, "onnxleakyrelu-16"]], "OnnxLeakyRelu_6": [[64, "onnxleakyrelu-6"]], "OnnxLess": [[64, "onnxless"]], "OnnxLessOrEqual": [[64, "onnxlessorequal"]], "OnnxLessOrEqual_12": [[64, "onnxlessorequal-12"]], "OnnxLessOrEqual_16": [[64, "onnxlessorequal-16"]], "OnnxLess_1": [[64, "onnxless-1"]], "OnnxLess_13": [[64, "onnxless-13"]], "OnnxLess_7": [[64, "onnxless-7"]], "OnnxLess_9": [[64, "onnxless-9"]], "OnnxLinearClassifier": [[64, "onnxlinearclassifier"]], "OnnxLinearClassifier_1": [[64, "onnxlinearclassifier-1"]], "OnnxLinearRegressor": [[64, "onnxlinearregressor"]], "OnnxLinearRegressor_1": [[64, "onnxlinearregressor-1"]], "OnnxLog": [[64, "onnxlog"]], "OnnxLogSoftmax": [[64, "onnxlogsoftmax"]], "OnnxLogSoftmax_1": [[64, "onnxlogsoftmax-1"]], "OnnxLogSoftmax_11": [[64, "onnxlogsoftmax-11"]], "OnnxLogSoftmax_13": [[64, "onnxlogsoftmax-13"]], "OnnxLog_1": [[64, "onnxlog-1"]], "OnnxLog_13": [[64, "onnxlog-13"]], "OnnxLog_6": [[64, "onnxlog-6"]], "OnnxLoop": [[64, "onnxloop"]], "OnnxLoop_1": [[64, "onnxloop-1"]], "OnnxLoop_11": [[64, "onnxloop-11"]], "OnnxLoop_13": [[64, "onnxloop-13"]], "OnnxLoop_16": [[64, "onnxloop-16"]], "OnnxLpNormalization": [[64, "onnxlpnormalization"]], "OnnxLpNormalization_1": [[64, "onnxlpnormalization-1"]], "OnnxLpPool": [[64, "onnxlppool"]], "OnnxLpPool_1": [[64, "onnxlppool-1"]], "OnnxLpPool_11": [[64, "onnxlppool-11"]], "OnnxLpPool_18": [[64, "onnxlppool-18"]], "OnnxLpPool_2": [[64, "onnxlppool-2"]], "OnnxMatMul": [[64, "onnxmatmul"]], "OnnxMatMulInteger": [[64, "onnxmatmulinteger"]], "OnnxMatMulInteger_10": [[64, "onnxmatmulinteger-10"]], "OnnxMatMul_1": [[64, "onnxmatmul-1"]], "OnnxMatMul_13": [[64, "onnxmatmul-13"]], "OnnxMatMul_9": [[64, "onnxmatmul-9"]], "OnnxMax": [[64, "onnxmax"]], "OnnxMaxPool": [[64, "onnxmaxpool"]], "OnnxMaxPool_1": [[64, "onnxmaxpool-1"]], "OnnxMaxPool_10": [[64, "onnxmaxpool-10"]], "OnnxMaxPool_11": [[64, "onnxmaxpool-11"]], "OnnxMaxPool_12": [[64, "onnxmaxpool-12"]], "OnnxMaxPool_8": [[64, "onnxmaxpool-8"]], "OnnxMaxRoiPool": [[64, "onnxmaxroipool"]], "OnnxMaxRoiPool_1": [[64, "onnxmaxroipool-1"]], "OnnxMaxUnpool": [[64, "onnxmaxunpool"]], "OnnxMaxUnpool_11": [[64, "onnxmaxunpool-11"]], "OnnxMaxUnpool_9": [[64, "onnxmaxunpool-9"]], "OnnxMax_1": [[64, "onnxmax-1"]], "OnnxMax_12": [[64, "onnxmax-12"]], "OnnxMax_13": [[64, "onnxmax-13"]], "OnnxMax_6": [[64, "onnxmax-6"]], "OnnxMax_8": [[64, "onnxmax-8"]], "OnnxMean": [[64, "onnxmean"]], "OnnxMeanVarianceNormalization": [[64, "onnxmeanvariancenormalization"]], "OnnxMeanVarianceNormalization_13": [[64, "onnxmeanvariancenormalization-13"]], "OnnxMeanVarianceNormalization_9": [[64, "onnxmeanvariancenormalization-9"]], "OnnxMean_1": [[64, "onnxmean-1"]], "OnnxMean_13": [[64, "onnxmean-13"]], "OnnxMean_6": [[64, "onnxmean-6"]], "OnnxMean_8": [[64, "onnxmean-8"]], "OnnxMelWeightMatrix": [[64, "onnxmelweightmatrix"]], "OnnxMelWeightMatrix_17": [[64, "onnxmelweightmatrix-17"]], "OnnxMin": [[64, "onnxmin"]], "OnnxMin_1": [[64, "onnxmin-1"]], "OnnxMin_12": [[64, "onnxmin-12"]], "OnnxMin_13": [[64, "onnxmin-13"]], "OnnxMin_6": [[64, "onnxmin-6"]], "OnnxMin_8": [[64, "onnxmin-8"]], "OnnxMish": [[64, "onnxmish"]], "OnnxMish_18": [[64, "onnxmish-18"]], "OnnxMod": [[64, "onnxmod"]], "OnnxMod_10": [[64, "onnxmod-10"]], "OnnxMod_13": [[64, "onnxmod-13"]], "OnnxMomentum": [[64, "onnxmomentum"]], "OnnxMomentum_1": [[64, "onnxmomentum-1"]], "OnnxMul": [[64, "onnxmul"]], "OnnxMul_1": [[64, "onnxmul-1"]], "OnnxMul_13": [[64, "onnxmul-13"]], "OnnxMul_14": [[64, "onnxmul-14"]], "OnnxMul_6": [[64, "onnxmul-6"]], "OnnxMul_7": [[64, "onnxmul-7"]], "OnnxMultinomial": [[64, "onnxmultinomial"]], "OnnxMultinomial_7": [[64, "onnxmultinomial-7"]], "OnnxNeg": [[64, "onnxneg"]], "OnnxNeg_1": [[64, "onnxneg-1"]], "OnnxNeg_13": [[64, "onnxneg-13"]], "OnnxNeg_6": [[64, "onnxneg-6"]], "OnnxNegativeLogLikelihoodLoss": [[64, "onnxnegativeloglikelihoodloss"]], "OnnxNegativeLogLikelihoodLoss_12": [[64, "onnxnegativeloglikelihoodloss-12"]], "OnnxNegativeLogLikelihoodLoss_13": [[64, "onnxnegativeloglikelihoodloss-13"]], "OnnxNonMaxSuppression": [[64, "onnxnonmaxsuppression"]], "OnnxNonMaxSuppression_10": [[64, "onnxnonmaxsuppression-10"]], "OnnxNonMaxSuppression_11": [[64, "onnxnonmaxsuppression-11"]], "OnnxNonZero": [[64, "onnxnonzero"]], "OnnxNonZero_13": [[64, "onnxnonzero-13"]], "OnnxNonZero_9": [[64, "onnxnonzero-9"]], "OnnxNormalizer": [[64, "onnxnormalizer"]], "OnnxNormalizer_1": [[64, "onnxnormalizer-1"]], "OnnxNot": [[64, "onnxnot"]], "OnnxNot_1": [[64, "onnxnot-1"]], "OnnxOneHot": [[64, "onnxonehot"]], "OnnxOneHotEncoder": [[64, "onnxonehotencoder"]], "OnnxOneHotEncoder_1": [[64, "onnxonehotencoder-1"]], "OnnxOneHot_11": [[64, "onnxonehot-11"]], "OnnxOneHot_9": [[64, "onnxonehot-9"]], "OnnxOptional": [[64, "onnxoptional"]], "OnnxOptionalGetElement": [[64, "onnxoptionalgetelement"]], "OnnxOptionalGetElement_15": [[64, "onnxoptionalgetelement-15"]], "OnnxOptionalGetElement_18": [[64, "onnxoptionalgetelement-18"]], "OnnxOptionalHasElement": [[64, "onnxoptionalhaselement"]], "OnnxOptionalHasElement_15": [[64, "onnxoptionalhaselement-15"]], "OnnxOptionalHasElement_18": [[64, "onnxoptionalhaselement-18"]], "OnnxOptional_15": [[64, "onnxoptional-15"]], "OnnxOr": [[64, "onnxor"]], "OnnxOr_1": [[64, "onnxor-1"]], "OnnxOr_7": [[64, "onnxor-7"]], "OnnxPRelu": [[64, "onnxprelu"]], "OnnxPRelu_1": [[64, "onnxprelu-1"]], "OnnxPRelu_16": [[64, "onnxprelu-16"]], "OnnxPRelu_6": [[64, "onnxprelu-6"]], "OnnxPRelu_7": [[64, "onnxprelu-7"]], "OnnxPRelu_9": [[64, "onnxprelu-9"]], "OnnxPad": [[64, "onnxpad"]], "OnnxPad_1": [[64, "onnxpad-1"]], "OnnxPad_11": [[64, "onnxpad-11"]], "OnnxPad_13": [[64, "onnxpad-13"]], "OnnxPad_18": [[64, "onnxpad-18"]], "OnnxPad_19": [[64, "onnxpad-19"]], "OnnxPad_2": [[64, "onnxpad-2"]], "OnnxPow": [[64, "onnxpow"]], "OnnxPow_1": [[64, "onnxpow-1"]], "OnnxPow_12": [[64, "onnxpow-12"]], "OnnxPow_13": [[64, "onnxpow-13"]], "OnnxPow_15": [[64, "onnxpow-15"]], "OnnxPow_7": [[64, "onnxpow-7"]], "OnnxQLinearConv": [[64, "onnxqlinearconv"]], "OnnxQLinearConv_10": [[64, "onnxqlinearconv-10"]], "OnnxQLinearMatMul": [[64, "onnxqlinearmatmul"]], "OnnxQLinearMatMul_10": [[64, "onnxqlinearmatmul-10"]], "OnnxQuantizeLinear": [[64, "onnxquantizelinear"]], "OnnxQuantizeLinear_10": [[64, "onnxquantizelinear-10"]], "OnnxQuantizeLinear_13": [[64, "onnxquantizelinear-13"]], "OnnxRNN": [[64, "onnxrnn"]], "OnnxRNN_1": [[64, "onnxrnn-1"]], "OnnxRNN_14": [[64, "onnxrnn-14"]], "OnnxRNN_7": [[64, "onnxrnn-7"]], "OnnxRandomNormal": [[64, "onnxrandomnormal"]], "OnnxRandomNormalLike": [[64, "onnxrandomnormallike"]], "OnnxRandomNormalLike_1": [[64, "onnxrandomnormallike-1"]], "OnnxRandomNormal_1": [[64, "onnxrandomnormal-1"]], "OnnxRandomUniform": [[64, "onnxrandomuniform"]], "OnnxRandomUniformLike": [[64, "onnxrandomuniformlike"]], "OnnxRandomUniformLike_1": [[64, "onnxrandomuniformlike-1"]], "OnnxRandomUniform_1": [[64, "onnxrandomuniform-1"]], "OnnxRange": [[64, "onnxrange"]], "OnnxRange_11": [[64, "onnxrange-11"]], "OnnxReciprocal": [[64, "onnxreciprocal"]], "OnnxReciprocal_1": [[64, "onnxreciprocal-1"]], "OnnxReciprocal_13": [[64, "onnxreciprocal-13"]], "OnnxReciprocal_6": [[64, "onnxreciprocal-6"]], "OnnxReduceL1": [[64, "onnxreducel1"]], "OnnxReduceL1_1": [[64, "onnxreducel1-1"]], "OnnxReduceL1_11": [[64, "onnxreducel1-11"]], "OnnxReduceL1_13": [[64, "onnxreducel1-13"]], "OnnxReduceL1_18": [[64, "onnxreducel1-18"]], "OnnxReduceL2": [[64, "onnxreducel2"]], "OnnxReduceL2_1": [[64, "onnxreducel2-1"]], "OnnxReduceL2_11": [[64, "onnxreducel2-11"]], "OnnxReduceL2_13": [[64, "onnxreducel2-13"]], "OnnxReduceL2_18": [[64, "onnxreducel2-18"]], "OnnxReduceLogSum": [[64, "onnxreducelogsum"]], "OnnxReduceLogSumExp": [[64, "onnxreducelogsumexp"]], "OnnxReduceLogSumExp_1": [[64, "onnxreducelogsumexp-1"]], "OnnxReduceLogSumExp_11": [[64, "onnxreducelogsumexp-11"]], "OnnxReduceLogSumExp_13": [[64, "onnxreducelogsumexp-13"]], "OnnxReduceLogSumExp_18": [[64, "onnxreducelogsumexp-18"]], "OnnxReduceLogSum_1": [[64, "onnxreducelogsum-1"]], "OnnxReduceLogSum_11": [[64, "onnxreducelogsum-11"]], "OnnxReduceLogSum_13": [[64, "onnxreducelogsum-13"]], "OnnxReduceLogSum_18": [[64, "onnxreducelogsum-18"]], "OnnxReduceMax": [[64, "onnxreducemax"]], "OnnxReduceMax_1": [[64, "onnxreducemax-1"]], "OnnxReduceMax_11": [[64, "onnxreducemax-11"]], "OnnxReduceMax_12": [[64, "onnxreducemax-12"]], "OnnxReduceMax_13": [[64, "onnxreducemax-13"]], "OnnxReduceMax_18": [[64, "onnxreducemax-18"]], "OnnxReduceMean": [[64, "onnxreducemean"]], "OnnxReduceMean_1": [[64, "onnxreducemean-1"]], "OnnxReduceMean_11": [[64, "onnxreducemean-11"]], "OnnxReduceMean_13": [[64, "onnxreducemean-13"]], "OnnxReduceMean_18": [[64, "onnxreducemean-18"]], "OnnxReduceMin": [[64, "onnxreducemin"]], "OnnxReduceMin_1": [[64, "onnxreducemin-1"]], "OnnxReduceMin_11": [[64, "onnxreducemin-11"]], "OnnxReduceMin_12": [[64, "onnxreducemin-12"]], "OnnxReduceMin_13": [[64, "onnxreducemin-13"]], "OnnxReduceMin_18": [[64, "onnxreducemin-18"]], "OnnxReduceProd": [[64, "onnxreduceprod"]], "OnnxReduceProd_1": [[64, "onnxreduceprod-1"]], "OnnxReduceProd_11": [[64, "onnxreduceprod-11"]], "OnnxReduceProd_13": [[64, "onnxreduceprod-13"]], "OnnxReduceProd_18": [[64, "onnxreduceprod-18"]], "OnnxReduceSum": [[64, "onnxreducesum"]], "OnnxReduceSumSquare": [[64, "onnxreducesumsquare"]], "OnnxReduceSumSquare_1": [[64, "onnxreducesumsquare-1"]], "OnnxReduceSumSquare_11": [[64, "onnxreducesumsquare-11"]], "OnnxReduceSumSquare_13": [[64, "onnxreducesumsquare-13"]], "OnnxReduceSumSquare_18": [[64, "onnxreducesumsquare-18"]], "OnnxReduceSum_1": [[64, "onnxreducesum-1"]], "OnnxReduceSum_11": [[64, "onnxreducesum-11"]], "OnnxReduceSum_13": [[64, "onnxreducesum-13"]], "OnnxRelu": [[64, "onnxrelu"]], "OnnxRelu_1": [[64, "onnxrelu-1"]], "OnnxRelu_13": [[64, "onnxrelu-13"]], "OnnxRelu_14": [[64, "onnxrelu-14"]], "OnnxRelu_6": [[64, "onnxrelu-6"]], "OnnxReshape": [[64, "onnxreshape"]], "OnnxReshape_1": [[64, "onnxreshape-1"]], "OnnxReshape_13": [[64, "onnxreshape-13"]], "OnnxReshape_14": [[64, "onnxreshape-14"]], "OnnxReshape_5": [[64, "onnxreshape-5"]], "OnnxResize": [[64, "onnxresize"]], "OnnxResize_10": [[64, "onnxresize-10"]], "OnnxResize_11": [[64, "onnxresize-11"]], "OnnxResize_13": [[64, "onnxresize-13"]], "OnnxResize_18": [[64, "onnxresize-18"]], "OnnxResize_19": [[64, "onnxresize-19"]], "OnnxReverseSequence": [[64, "onnxreversesequence"]], "OnnxReverseSequence_10": [[64, "onnxreversesequence-10"]], "OnnxRoiAlign": [[64, "onnxroialign"]], "OnnxRoiAlign_10": [[64, "onnxroialign-10"]], "OnnxRoiAlign_16": [[64, "onnxroialign-16"]], "OnnxRound": [[64, "onnxround"]], "OnnxRound_11": [[64, "onnxround-11"]], "OnnxSTFT": [[64, "onnxstft"]], "OnnxSTFT_17": [[64, "onnxstft-17"]], "OnnxSVMClassifier": [[64, "onnxsvmclassifier"]], "OnnxSVMClassifier_1": [[64, "onnxsvmclassifier-1"]], "OnnxSVMRegressor": [[64, "onnxsvmregressor"]], "OnnxSVMRegressor_1": [[64, "onnxsvmregressor-1"]], "OnnxScaler": [[64, "onnxscaler"]], "OnnxScaler_1": [[64, "onnxscaler-1"]], "OnnxScan": [[64, "onnxscan"]], "OnnxScan_11": [[64, "onnxscan-11"]], "OnnxScan_16": [[64, "onnxscan-16"]], "OnnxScan_8": [[64, "onnxscan-8"]], "OnnxScan_9": [[64, "onnxscan-9"]], "OnnxScatter": [[64, "onnxscatter"]], "OnnxScatterElements": [[64, "onnxscatterelements"]], "OnnxScatterElements_11": [[64, "onnxscatterelements-11"]], "OnnxScatterElements_13": [[64, "onnxscatterelements-13"]], "OnnxScatterElements_16": [[64, "onnxscatterelements-16"]], "OnnxScatterElements_18": [[64, "onnxscatterelements-18"]], "OnnxScatterND": [[64, "onnxscatternd"]], "OnnxScatterND_11": [[64, "onnxscatternd-11"]], "OnnxScatterND_13": [[64, "onnxscatternd-13"]], "OnnxScatterND_16": [[64, "onnxscatternd-16"]], "OnnxScatterND_18": [[64, "onnxscatternd-18"]], "OnnxScatter_11": [[64, "onnxscatter-11"]], "OnnxScatter_9": [[64, "onnxscatter-9"]], "OnnxSelu": [[64, "onnxselu"]], "OnnxSelu_1": [[64, "onnxselu-1"]], "OnnxSelu_6": [[64, "onnxselu-6"]], "OnnxSequenceAt": [[64, "onnxsequenceat"]], "OnnxSequenceAt_11": [[64, "onnxsequenceat-11"]], "OnnxSequenceConstruct": [[64, "onnxsequenceconstruct"]], "OnnxSequenceConstruct_11": [[64, "onnxsequenceconstruct-11"]], "OnnxSequenceEmpty": [[64, "onnxsequenceempty"]], "OnnxSequenceEmpty_11": [[64, "onnxsequenceempty-11"]], "OnnxSequenceErase": [[64, "onnxsequenceerase"]], "OnnxSequenceErase_11": [[64, "onnxsequenceerase-11"]], "OnnxSequenceInsert": [[64, "onnxsequenceinsert"]], "OnnxSequenceInsert_11": [[64, "onnxsequenceinsert-11"]], "OnnxSequenceLength": [[64, "onnxsequencelength"]], "OnnxSequenceLength_11": [[64, "onnxsequencelength-11"]], "OnnxSequenceMap": [[64, "onnxsequencemap"]], "OnnxSequenceMap_17": [[64, "onnxsequencemap-17"]], "OnnxShape": [[64, "onnxshape"]], "OnnxShape_1": [[64, "onnxshape-1"]], "OnnxShape_13": [[64, "onnxshape-13"]], "OnnxShape_15": [[64, "onnxshape-15"]], "OnnxShrink": [[64, "onnxshrink"]], "OnnxShrink_9": [[64, "onnxshrink-9"]], "OnnxSigmoid": [[64, "onnxsigmoid"]], "OnnxSigmoid_1": [[64, "onnxsigmoid-1"]], "OnnxSigmoid_13": [[64, "onnxsigmoid-13"]], "OnnxSigmoid_6": [[64, "onnxsigmoid-6"]], "OnnxSign": [[64, "onnxsign"]], "OnnxSign_13": [[64, "onnxsign-13"]], "OnnxSign_9": [[64, "onnxsign-9"]], "OnnxSin": [[64, "onnxsin"]], "OnnxSin_7": [[64, "onnxsin-7"]], "OnnxSinh": [[64, "onnxsinh"]], "OnnxSinh_9": [[64, "onnxsinh-9"]], "OnnxSize": [[64, "onnxsize"]], "OnnxSize_1": [[64, "onnxsize-1"]], "OnnxSize_13": [[64, "onnxsize-13"]], "OnnxSlice": [[64, "onnxslice"]], "OnnxSlice_1": [[64, "onnxslice-1"]], "OnnxSlice_10": [[64, "onnxslice-10"]], "OnnxSlice_11": [[64, "onnxslice-11"]], "OnnxSlice_13": [[64, "onnxslice-13"]], "OnnxSoftmax": [[64, "onnxsoftmax"]], "OnnxSoftmaxCrossEntropyLoss": [[64, "onnxsoftmaxcrossentropyloss"]], "OnnxSoftmaxCrossEntropyLoss_12": [[64, "onnxsoftmaxcrossentropyloss-12"]], "OnnxSoftmaxCrossEntropyLoss_13": [[64, "onnxsoftmaxcrossentropyloss-13"]], "OnnxSoftmax_1": [[64, "onnxsoftmax-1"]], "OnnxSoftmax_11": [[64, "onnxsoftmax-11"]], "OnnxSoftmax_13": [[64, "onnxsoftmax-13"]], "OnnxSoftplus": [[64, "onnxsoftplus"]], "OnnxSoftplus_1": [[64, "onnxsoftplus-1"]], "OnnxSoftsign": [[64, "onnxsoftsign"]], "OnnxSoftsign_1": [[64, "onnxsoftsign-1"]], "OnnxSpaceToDepth": [[64, "onnxspacetodepth"]], "OnnxSpaceToDepth_1": [[64, "onnxspacetodepth-1"]], "OnnxSpaceToDepth_13": [[64, "onnxspacetodepth-13"]], "OnnxSplit": [[64, "onnxsplit"]], "OnnxSplitToSequence": [[64, "onnxsplittosequence"]], "OnnxSplitToSequence_11": [[64, "onnxsplittosequence-11"]], "OnnxSplit_1": [[64, "onnxsplit-1"]], "OnnxSplit_11": [[64, "onnxsplit-11"]], "OnnxSplit_13": [[64, "onnxsplit-13"]], "OnnxSplit_18": [[64, "onnxsplit-18"]], "OnnxSplit_2": [[64, "onnxsplit-2"]], "OnnxSqrt": [[64, "onnxsqrt"]], "OnnxSqrt_1": [[64, "onnxsqrt-1"]], "OnnxSqrt_13": [[64, "onnxsqrt-13"]], "OnnxSqrt_6": [[64, "onnxsqrt-6"]], "OnnxSqueeze": [[64, "onnxsqueeze"]], "OnnxSqueeze_1": [[64, "onnxsqueeze-1"]], "OnnxSqueeze_11": [[64, "onnxsqueeze-11"]], "OnnxSqueeze_13": [[64, "onnxsqueeze-13"]], "OnnxStringNormalizer": [[64, "onnxstringnormalizer"]], "OnnxStringNormalizer_10": [[64, "onnxstringnormalizer-10"]], "OnnxSub": [[64, "onnxsub"]], "OnnxSub_1": [[64, "onnxsub-1"]], "OnnxSub_13": [[64, "onnxsub-13"]], "OnnxSub_14": [[64, "onnxsub-14"]], "OnnxSub_6": [[64, "onnxsub-6"]], "OnnxSub_7": [[64, "onnxsub-7"]], "OnnxSum": [[64, "onnxsum"]], "OnnxSum_1": [[64, "onnxsum-1"]], "OnnxSum_13": [[64, "onnxsum-13"]], "OnnxSum_6": [[64, "onnxsum-6"]], "OnnxSum_8": [[64, "onnxsum-8"]], "OnnxTan": [[64, "onnxtan"]], "OnnxTan_7": [[64, "onnxtan-7"]], "OnnxTanh": [[64, "onnxtanh"]], "OnnxTanh_1": [[64, "onnxtanh-1"]], "OnnxTanh_13": [[64, "onnxtanh-13"]], "OnnxTanh_6": [[64, "onnxtanh-6"]], "OnnxTfIdfVectorizer": [[64, "onnxtfidfvectorizer"]], "OnnxTfIdfVectorizer_9": [[64, "onnxtfidfvectorizer-9"]], "OnnxThresholdedRelu": [[64, "onnxthresholdedrelu"]], "OnnxThresholdedRelu_10": [[64, "onnxthresholdedrelu-10"]], "OnnxTile": [[64, "onnxtile"]], "OnnxTile_1": [[64, "onnxtile-1"]], "OnnxTile_13": [[64, "onnxtile-13"]], "OnnxTile_6": [[64, "onnxtile-6"]], "OnnxTopK": [[64, "onnxtopk"]], "OnnxTopK_1": [[64, "onnxtopk-1"]], "OnnxTopK_10": [[64, "onnxtopk-10"]], "OnnxTopK_11": [[64, "onnxtopk-11"]], "OnnxTranspose": [[64, "onnxtranspose"]], "OnnxTranspose_1": [[64, "onnxtranspose-1"]], "OnnxTranspose_13": [[64, "onnxtranspose-13"]], "OnnxTreeEnsembleClassifier": [[64, "onnxtreeensembleclassifier"]], "OnnxTreeEnsembleClassifier_1": [[64, "onnxtreeensembleclassifier-1"]], "OnnxTreeEnsembleClassifier_3": [[64, "onnxtreeensembleclassifier-3"]], "OnnxTreeEnsembleRegressor": [[64, "onnxtreeensembleregressor"]], "OnnxTreeEnsembleRegressor_1": [[64, "onnxtreeensembleregressor-1"]], "OnnxTreeEnsembleRegressor_3": [[64, "onnxtreeensembleregressor-3"]], "OnnxTrilu": [[64, "onnxtrilu"]], "OnnxTrilu_14": [[64, "onnxtrilu-14"]], "OnnxUnique": [[64, "onnxunique"]], "OnnxUnique_11": [[64, "onnxunique-11"]], "OnnxUnsqueeze": [[64, "onnxunsqueeze"]], "OnnxUnsqueeze_1": [[64, "onnxunsqueeze-1"]], "OnnxUnsqueeze_11": [[64, "onnxunsqueeze-11"]], "OnnxUnsqueeze_13": [[64, "onnxunsqueeze-13"]], "OnnxUpsample": [[64, "onnxupsample"]], "OnnxUpsample_10": [[64, "onnxupsample-10"]], "OnnxUpsample_7": [[64, "onnxupsample-7"]], "OnnxUpsample_9": [[64, "onnxupsample-9"]], "OnnxWhere": [[64, "onnxwhere"]], "OnnxWhere_16": [[64, "onnxwhere-16"]], "OnnxWhere_9": [[64, "onnxwhere-9"]], "OnnxXor": [[64, "onnxxor"]], "OnnxXor_1": [[64, "onnxxor-1"]], "OnnxXor_7": [[64, "onnxxor-7"]], "OnnxZipMap": [[64, "onnxzipmap"]], "OnnxZipMap_1": [[64, "onnxzipmap-1"]]}, "indexentries": {"modelcomponentcontainer (class in skl2onnx.common._container)": [[0, "skl2onnx.common._container.ModelComponentContainer"]], "operator (class in skl2onnx.common._topology)": [[0, "skl2onnx.common._topology.Operator"]], "scope (class in skl2onnx.common._topology)": [[0, "skl2onnx.common._topology.Scope"]], "sklearnmodelcontainernode (class in skl2onnx.common._container)": [[0, "skl2onnx.common._container.SklearnModelContainerNode"]], "topology (class in skl2onnx.common._topology)": [[0, "skl2onnx.common._topology.Topology"]], "variable (class in skl2onnx.common._topology)": [[0, "skl2onnx.common._topology.Variable"]], "add_initializer() (skl2onnx.common._container.modelcomponentcontainer method)": [[0, "skl2onnx.common._container.ModelComponentContainer.add_initializer"]], "add_input() (skl2onnx.common._container.modelcomponentcontainer method)": [[0, "skl2onnx.common._container.ModelComponentContainer.add_input"]], "add_node() (skl2onnx.common._container.modelcomponentcontainer method)": [[0, "skl2onnx.common._container.ModelComponentContainer.add_node"]], "add_output() (skl2onnx.common._container.modelcomponentcontainer method)": [[0, "skl2onnx.common._container.ModelComponentContainer.add_output"]], "check_input_and_output_numbers() (in module skl2onnx.common.utils)": [[0, "skl2onnx.common.utils.check_input_and_output_numbers"]], "check_input_and_output_types() (in module skl2onnx.common.utils)": [[0, "skl2onnx.common.utils.check_input_and_output_types"]], "convert_sklearn() (in module skl2onnx)": [[0, "skl2onnx.convert_sklearn"]], "convert_topology() (in module skl2onnx.common._topology)": [[0, "skl2onnx.common._topology.convert_topology"]], "enumerate_model_node_outputs() (in module skl2onnx.helpers.onnx_helper)": [[0, "skl2onnx.helpers.onnx_helper.enumerate_model_node_outputs"]], "get_latest_tested_opset_version() (in module skl2onnx)": [[0, "skl2onnx.get_latest_tested_opset_version"]], "get_unique_operator_name() (skl2onnx.common._topology.scope method)": [[0, "skl2onnx.common._topology.Scope.get_unique_operator_name"]], "get_unique_variable_name() (skl2onnx.common._topology.scope method)": [[0, "skl2onnx.common._topology.Scope.get_unique_variable_name"]], "input_names (skl2onnx.common._container.sklearnmodelcontainernode property)": [[0, "skl2onnx.common._container.SklearnModelContainerNode.input_names"]], "load_onnx_model() (in module skl2onnx.helpers.onnx_helper)": [[0, "skl2onnx.helpers.onnx_helper.load_onnx_model"]], "logging": [[0, "index-0"]], "output_names (skl2onnx.common._container.sklearnmodelcontainernode property)": [[0, "skl2onnx.common._container.SklearnModelContainerNode.output_names"]], "parse_sklearn() (in module skl2onnx._parse)": [[0, "skl2onnx._parse.parse_sklearn"]], "parse_sklearn_model() (in module skl2onnx._parse)": [[0, "skl2onnx._parse.parse_sklearn_model"]], "save_onnx_model() (in module skl2onnx.helpers.onnx_helper)": [[0, "skl2onnx.helpers.onnx_helper.save_onnx_model"]], "select_model_inputs_outputs() (in module skl2onnx.helpers.onnx_helper)": [[0, "skl2onnx.helpers.onnx_helper.select_model_inputs_outputs"]], "supported_converters() (in module skl2onnx)": [[0, "skl2onnx.supported_converters"]], "to_onnx() (in module skl2onnx)": [[0, "skl2onnx.to_onnx"]], "update_registered_converter() (in module skl2onnx)": [[0, "skl2onnx.update_registered_converter"]], "update_registered_parser() (in module skl2onnx)": [[0, "skl2onnx.update_registered_parser"]], "lightgbm": [[24, "index-0"], [43, "index-0"], [44, "index-0"], [55, "index-0"]], "xgboost": [[25, "index-0"], [45, "index-0"], [55, "index-0"]], "deployment": [[29, "index-0"]], "pipeline": [[29, "index-0"]], "benchmark": [[30, "index-0"]], "woe": [[31, "index-0"], [57, "index-0"]], "woeencoder": [[31, "index-0"]], "opset": [[32, "index-0"]], "target opset": [[32, "index-0"]], "version": [[32, "index-0"]], "options": [[33, "index-0"], [49, "index-0"]], "black list": [[34, "index-0"]], "white list": [[34, "index-0"]], "discrepencies": [[36, "index-0"]], "double": [[36, "index-0"]], "float": [[36, "index-0"]], "intermediate results": [[37, "index-0"]], "investigate": [[37, "index-0"]], "dataframe": [[39, "index-0"]], "deep learning": [[40, "index-0"]], "transfer learning": [[40, "index-0"]], "catboost": [[42, "index-0"]], "custom converter": [[46, "index-0"], [48, "index-0"]], "syntax": [[47, "index-0"]], "parser": [[50, "index-0"], [63, "index-0"]], "custom python runtime": [[52, "index-0"]], "td-idf": [[54, "index-0"]], "ensemble": [[55, "index-0"]], "sparse": [[55, "index-0"]], "iforest": [[56, "index-0"]], "pyod": [[56, "index-0"]], "woetransformer": [[57, "index-0"]], "opset version": [[59, "index-0"]], "target_opset": [[59, "index-0"]], "initial types": [[61, "index-0"]], "cdist": [[62, "index-0"]], "convert_sklearn_text_vectorizer() (in module skl2onnx.operator_converters.text_vectoriser)": [[62, "skl2onnx.operator_converters.text_vectoriser.convert_sklearn_text_vectorizer"]], "pairwise distances": [[62, "index-0"]], "converter": [[63, "index-0"]], "shape calculator": [[63, "index-0"]], "onnxabs (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAbs"]], "onnxabs_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAbs_1"]], "onnxabs_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAbs_13"]], "onnxabs_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAbs_6"]], "onnxacos (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAcos"]], "onnxacos_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAcos_7"]], "onnxacosh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAcosh"]], "onnxacosh_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAcosh_9"]], "onnxadagrad (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdagrad"]], "onnxadagrad_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdagrad_1"]], "onnxadam (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdam"]], "onnxadam_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdam_1"]], "onnxadd (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd"]], "onnxadd_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd_1"]], "onnxadd_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd_13"]], "onnxadd_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd_14"]], "onnxadd_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd_6"]], "onnxadd_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAdd_7"]], "onnxand (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAnd"]], "onnxand_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAnd_1"]], "onnxand_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAnd_7"]], "onnxargmax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMax"]], "onnxargmax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMax_1"]], "onnxargmax_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMax_11"]], "onnxargmax_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMax_12"]], "onnxargmax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMax_13"]], "onnxargmin (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMin"]], "onnxargmin_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMin_1"]], "onnxargmin_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMin_11"]], "onnxargmin_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMin_12"]], "onnxargmin_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArgMin_13"]], "onnxarrayfeatureextractor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor"]], "onnxarrayfeatureextractor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor_1"]], "onnxasin (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAsin"]], "onnxasin_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAsin_7"]], "onnxasinh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAsinh"]], "onnxasinh_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAsinh_9"]], "onnxatan (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAtan"]], "onnxatan_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAtan_7"]], "onnxatanh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAtanh"]], "onnxatanh_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAtanh_9"]], "onnxaveragepool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool"]], "onnxaveragepool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool_1"]], "onnxaveragepool_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool_10"]], "onnxaveragepool_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool_11"]], "onnxaveragepool_19 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool_19"]], "onnxaveragepool_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxAveragePool_7"]], "onnxbatchnormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization"]], "onnxbatchnormalization_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_1"]], "onnxbatchnormalization_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_14"]], "onnxbatchnormalization_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_15"]], "onnxbatchnormalization_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_6"]], "onnxbatchnormalization_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_7"]], "onnxbatchnormalization_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_9"]], "onnxbernoulli (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBernoulli"]], "onnxbernoulli_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBernoulli_15"]], "onnxbinarizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBinarizer"]], "onnxbinarizer_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBinarizer_1"]], "onnxbitshift (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitShift"]], "onnxbitshift_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitShift_11"]], "onnxbitwiseand (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseAnd"]], "onnxbitwiseand_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseAnd_18"]], "onnxbitwisenot (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseNot"]], "onnxbitwisenot_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseNot_18"]], "onnxbitwiseor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseOr"]], "onnxbitwiseor_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseOr_18"]], "onnxbitwisexor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseXor"]], "onnxbitwisexor_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBitwiseXor_18"]], "onnxblackmanwindow (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBlackmanWindow"]], "onnxblackmanwindow_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxBlackmanWindow_17"]], "onnxcast (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCast"]], "onnxcastlike (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCastLike"]], "onnxcastlike_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCastLike_15"]], "onnxcastmap (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCastMap"]], "onnxcastmap_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCastMap_1"]], "onnxcastregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxCastRegressor"]], "onnxcasttransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxCastTransformer"]], "onnxcast_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCast_1"]], "onnxcast_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCast_13"]], "onnxcast_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCast_6"]], "onnxcast_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCast_9"]], "onnxcategorymapper (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCategoryMapper"]], "onnxcategorymapper_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCategoryMapper_1"]], "onnxceil (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCeil"]], "onnxceil_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCeil_1"]], "onnxceil_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCeil_13"]], "onnxceil_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCeil_6"]], "onnxcelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCelu"]], "onnxcelu_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCelu_12"]], "onnxcentercroppad (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCenterCropPad"]], "onnxcentercroppad_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCenterCropPad_18"]], "onnxclip (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip"]], "onnxclip_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip_1"]], "onnxclip_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip_11"]], "onnxclip_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip_12"]], "onnxclip_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip_13"]], "onnxclip_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxClip_6"]], "onnxcol2im (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCol2Im"]], "onnxcol2im_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCol2Im_18"]], "onnxcompress (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCompress"]], "onnxcompress_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCompress_11"]], "onnxcompress_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCompress_9"]], "onnxconcat (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcat"]], "onnxconcatfromsequence (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence"]], "onnxconcatfromsequence_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence_11"]], "onnxconcat_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcat_1"]], "onnxconcat_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcat_11"]], "onnxconcat_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcat_13"]], "onnxconcat_4 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConcat_4"]], "onnxconstant (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant"]], "onnxconstantofshape (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstantOfShape"]], "onnxconstantofshape_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstantOfShape_9"]], "onnxconstant_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant_1"]], "onnxconstant_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant_11"]], "onnxconstant_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant_12"]], "onnxconstant_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant_13"]], "onnxconstant_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConstant_9"]], "onnxconv (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConv"]], "onnxconvinteger (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConvInteger"]], "onnxconvinteger_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConvInteger_10"]], "onnxconvtranspose (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConvTranspose"]], "onnxconvtranspose_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConvTranspose_1"]], "onnxconvtranspose_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConvTranspose_11"]], "onnxconv_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConv_1"]], "onnxconv_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxConv_11"]], "onnxcos (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCos"]], "onnxcos_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCos_7"]], "onnxcosh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCosh"]], "onnxcosh_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCosh_9"]], "onnxcumsum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCumSum"]], "onnxcumsum_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCumSum_11"]], "onnxcumsum_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxCumSum_14"]], "onnxdft (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDFT"]], "onnxdft_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDFT_17"]], "onnxdepthtospace (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDepthToSpace"]], "onnxdepthtospace_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_1"]], "onnxdepthtospace_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_11"]], "onnxdepthtospace_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_13"]], "onnxdequantizelinear (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear"]], "onnxdequantizelinear_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_10"]], "onnxdequantizelinear_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_13"]], "onnxdet (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDet"]], "onnxdet_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDet_11"]], "onnxdictvectorizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDictVectorizer"]], "onnxdictvectorizer_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDictVectorizer_1"]], "onnxdiv (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv"]], "onnxdiv_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv_1"]], "onnxdiv_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv_13"]], "onnxdiv_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv_14"]], "onnxdiv_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv_6"]], "onnxdiv_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDiv_7"]], "onnxdropout (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout"]], "onnxdropout_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_1"]], "onnxdropout_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_10"]], "onnxdropout_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_12"]], "onnxdropout_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_13"]], "onnxdropout_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_6"]], "onnxdropout_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDropout_7"]], "onnxdynamicquantizelinear (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear"]], "onnxdynamicquantizelinear_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear_11"]], "onnxeinsum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEinsum"]], "onnxeinsum_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEinsum_12"]], "onnxelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxElu"]], "onnxelu_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxElu_1"]], "onnxelu_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxElu_6"]], "onnxequal (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual"]], "onnxequal_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual_1"]], "onnxequal_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual_11"]], "onnxequal_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual_13"]], "onnxequal_19 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual_19"]], "onnxequal_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEqual_7"]], "onnxerf (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxErf"]], "onnxerf_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxErf_13"]], "onnxerf_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxErf_9"]], "onnxexp (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExp"]], "onnxexp_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExp_1"]], "onnxexp_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExp_13"]], "onnxexp_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExp_6"]], "onnxexpand (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExpand"]], "onnxexpand_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExpand_13"]], "onnxexpand_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxExpand_8"]], "onnxeyelike (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEyeLike"]], "onnxeyelike_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxEyeLike_9"]], "onnxfeaturevectorizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer"]], "onnxfeaturevectorizer_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer_1"]], "onnxflatten (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFlatten"]], "onnxflatten_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFlatten_1"]], "onnxflatten_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFlatten_11"]], "onnxflatten_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFlatten_13"]], "onnxflatten_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFlatten_9"]], "onnxfloor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFloor"]], "onnxfloor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFloor_1"]], "onnxfloor_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFloor_13"]], "onnxfloor_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxFloor_6"]], "onnxgru (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGRU"]], "onnxgru_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGRU_1"]], "onnxgru_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGRU_14"]], "onnxgru_3 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGRU_3"]], "onnxgru_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGRU_7"]], "onnxgather (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGather"]], "onnxgatherelements (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherElements"]], "onnxgatherelements_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherElements_11"]], "onnxgatherelements_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherElements_13"]], "onnxgathernd (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherND"]], "onnxgathernd_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherND_11"]], "onnxgathernd_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherND_12"]], "onnxgathernd_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGatherND_13"]], "onnxgather_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGather_1"]], "onnxgather_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGather_11"]], "onnxgather_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGather_13"]], "onnxgemm (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm"]], "onnxgemm_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_1"]], "onnxgemm_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_11"]], "onnxgemm_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_13"]], "onnxgemm_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_6"]], "onnxgemm_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_7"]], "onnxgemm_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGemm_9"]], "onnxglobalaveragepool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool"]], "onnxglobalaveragepool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool_1"]], "onnxgloballppool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool"]], "onnxgloballppool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_1"]], "onnxgloballppool_2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_2"]], "onnxglobalmaxpool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool"]], "onnxglobalmaxpool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool_1"]], "onnxgradient (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGradient"]], "onnxgradient_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGradient_1"]], "onnxgreater (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreater"]], "onnxgreaterorequal (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual"]], "onnxgreaterorequal_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_12"]], "onnxgreaterorequal_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_16"]], "onnxgreater_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreater_1"]], "onnxgreater_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreater_13"]], "onnxgreater_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreater_7"]], "onnxgreater_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGreater_9"]], "onnxgridsample (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGridSample"]], "onnxgridsample_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGridSample_16"]], "onnxgroupnormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGroupNormalization"]], "onnxgroupnormalization_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxGroupNormalization_18"]], "onnxhammingwindow (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHammingWindow"]], "onnxhammingwindow_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHammingWindow_17"]], "onnxhannwindow (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHannWindow"]], "onnxhannwindow_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHannWindow_17"]], "onnxhardsigmoid (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardSigmoid"]], "onnxhardsigmoid_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_1"]], "onnxhardsigmoid_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_6"]], "onnxhardswish (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardSwish"]], "onnxhardswish_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardSwish_14"]], "onnxhardmax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardmax"]], "onnxhardmax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardmax_1"]], "onnxhardmax_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardmax_11"]], "onnxhardmax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxHardmax_13"]], "onnxidentity (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIdentity"]], "onnxidentity_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIdentity_1"]], "onnxidentity_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIdentity_13"]], "onnxidentity_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIdentity_14"]], "onnxidentity_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIdentity_16"]], "onnxif (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIf"]], "onnxif_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIf_1"]], "onnxif_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIf_11"]], "onnxif_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIf_13"]], "onnxif_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIf_16"]], "onnximputer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxImputer"]], "onnximputer_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxImputer_1"]], "onnxinstancenormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization"]], "onnxinstancenormalization_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_1"]], "onnxinstancenormalization_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_6"]], "onnxisinf (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIsInf"]], "onnxisinf_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIsInf_10"]], "onnxisnan (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIsNaN"]], "onnxisnan_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIsNaN_13"]], "onnxisnan_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxIsNaN_9"]], "onnxlrn (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLRN"]], "onnxlrn_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLRN_1"]], "onnxlrn_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLRN_13"]], "onnxlstm (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLSTM"]], "onnxlstm_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLSTM_1"]], "onnxlstm_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLSTM_14"]], "onnxlstm_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLSTM_7"]], "onnxlabelencoder (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLabelEncoder"]], "onnxlabelencoder_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_1"]], "onnxlabelencoder_2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_2"]], "onnxlayernormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLayerNormalization"]], "onnxlayernormalization_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLayerNormalization_17"]], "onnxleakyrelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLeakyRelu"]], "onnxleakyrelu_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_1"]], "onnxleakyrelu_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_16"]], "onnxleakyrelu_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_6"]], "onnxless (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLess"]], "onnxlessorequal (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLessOrEqual"]], "onnxlessorequal_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_12"]], "onnxlessorequal_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_16"]], "onnxless_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLess_1"]], "onnxless_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLess_13"]], "onnxless_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLess_7"]], "onnxless_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLess_9"]], "onnxlinearclassifier (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLinearClassifier"]], "onnxlinearclassifier_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLinearClassifier_1"]], "onnxlinearregressor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLinearRegressor"]], "onnxlinearregressor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLinearRegressor_1"]], "onnxlog (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLog"]], "onnxlogsoftmax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLogSoftmax"]], "onnxlogsoftmax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_1"]], "onnxlogsoftmax_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_11"]], "onnxlogsoftmax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_13"]], "onnxlog_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLog_1"]], "onnxlog_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLog_13"]], "onnxlog_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLog_6"]], "onnxloop (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLoop"]], "onnxloop_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLoop_1"]], "onnxloop_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLoop_11"]], "onnxloop_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLoop_13"]], "onnxloop_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLoop_16"]], "onnxlpnormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpNormalization"]], "onnxlpnormalization_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpNormalization_1"]], "onnxlppool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpPool"]], "onnxlppool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpPool_1"]], "onnxlppool_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpPool_11"]], "onnxlppool_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpPool_18"]], "onnxlppool_2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxLpPool_2"]], "onnxmatmul (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMul"]], "onnxmatmulinteger (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMulInteger"]], "onnxmatmulinteger_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMulInteger_10"]], "onnxmatmul_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMul_1"]], "onnxmatmul_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMul_13"]], "onnxmatmul_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMatMul_9"]], "onnxmax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax"]], "onnxmaxpool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool"]], "onnxmaxpool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool_1"]], "onnxmaxpool_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool_10"]], "onnxmaxpool_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool_11"]], "onnxmaxpool_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool_12"]], "onnxmaxpool_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxPool_8"]], "onnxmaxroipool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool"]], "onnxmaxroipool_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool_1"]], "onnxmaxunpool (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxUnpool"]], "onnxmaxunpool_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_11"]], "onnxmaxunpool_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_9"]], "onnxmax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax_1"]], "onnxmax_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax_12"]], "onnxmax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax_13"]], "onnxmax_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax_6"]], "onnxmax_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMax_8"]], "onnxmean (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMean"]], "onnxmeanvariancenormalization (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization"]], "onnxmeanvariancenormalization_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_13"]], "onnxmeanvariancenormalization_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_9"]], "onnxmean_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMean_1"]], "onnxmean_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMean_13"]], "onnxmean_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMean_6"]], "onnxmean_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMean_8"]], "onnxmelweightmatrix (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMelWeightMatrix"]], "onnxmelweightmatrix_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMelWeightMatrix_17"]], "onnxmin (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin"]], "onnxmin_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin_1"]], "onnxmin_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin_12"]], "onnxmin_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin_13"]], "onnxmin_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin_6"]], "onnxmin_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMin_8"]], "onnxmish (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMish"]], "onnxmish_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMish_18"]], "onnxmod (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMod"]], "onnxmod_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMod_10"]], "onnxmod_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMod_13"]], "onnxmomentum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMomentum"]], "onnxmomentum_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMomentum_1"]], "onnxmul (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul"]], "onnxmul_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul_1"]], "onnxmul_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul_13"]], "onnxmul_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul_14"]], "onnxmul_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul_6"]], "onnxmul_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMul_7"]], "onnxmultinomial (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMultinomial"]], "onnxmultinomial_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxMultinomial_7"]], "onnxneg (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNeg"]], "onnxneg_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNeg_1"]], "onnxneg_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNeg_13"]], "onnxneg_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNeg_6"]], "onnxnegativeloglikelihoodloss (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss"]], "onnxnegativeloglikelihoodloss_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_12"]], "onnxnegativeloglikelihoodloss_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_13"]], "onnxnonmaxsuppression (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression"]], "onnxnonmaxsuppression_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_10"]], "onnxnonmaxsuppression_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_11"]], "onnxnonzero (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonZero"]], "onnxnonzero_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonZero_13"]], "onnxnonzero_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNonZero_9"]], "onnxnormalizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNormalizer"]], "onnxnormalizer_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNormalizer_1"]], "onnxnot (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNot"]], "onnxnot_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxNot_1"]], "onnxonehot (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOneHot"]], "onnxonehotencoder (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder"]], "onnxonehotencoder_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder_1"]], "onnxonehot_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOneHot_11"]], "onnxonehot_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOneHot_9"]], "onnxoptional (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptional"]], "onnxoptionalgetelement (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement"]], "onnxoptionalgetelement_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement_15"]], "onnxoptionalgetelement_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement_18"]], "onnxoptionalhaselement (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement"]], "onnxoptionalhaselement_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement_15"]], "onnxoptionalhaselement_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement_18"]], "onnxoptional_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOptional_15"]], "onnxor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOr"]], "onnxor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOr_1"]], "onnxor_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxOr_7"]], "onnxprelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu"]], "onnxprelu_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu_1"]], "onnxprelu_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu_16"]], "onnxprelu_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu_6"]], "onnxprelu_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu_7"]], "onnxprelu_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPRelu_9"]], "onnxpad (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad"]], "onnxpad_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_1"]], "onnxpad_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_11"]], "onnxpad_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_13"]], "onnxpad_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_18"]], "onnxpad_19 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_19"]], "onnxpad_2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPad_2"]], "onnxpow (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow"]], "onnxpow_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow_1"]], "onnxpow_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow_12"]], "onnxpow_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow_13"]], "onnxpow_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow_15"]], "onnxpow_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxPow_7"]], "onnxqlinearconv (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQLinearConv"]], "onnxqlinearconv_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQLinearConv_10"]], "onnxqlinearmatmul (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul"]], "onnxqlinearmatmul_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul_10"]], "onnxquantizelinear (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear"]], "onnxquantizelinear_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_10"]], "onnxquantizelinear_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_13"]], "onnxrnn (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRNN"]], "onnxrnn_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRNN_1"]], "onnxrnn_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRNN_14"]], "onnxrnn_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRNN_7"]], "onnxrandomnormal (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomNormal"]], "onnxrandomnormallike (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike"]], "onnxrandomnormallike_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike_1"]], "onnxrandomnormal_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomNormal_1"]], "onnxrandomuniform (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomUniform"]], "onnxrandomuniformlike (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike"]], "onnxrandomuniformlike_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike_1"]], "onnxrandomuniform_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRandomUniform_1"]], "onnxrange (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRange"]], "onnxrange_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRange_11"]], "onnxreciprocal (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReciprocal"]], "onnxreciprocal_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReciprocal_1"]], "onnxreciprocal_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReciprocal_13"]], "onnxreciprocal_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReciprocal_6"]], "onnxreducel1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL1"]], "onnxreducel1_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL1_1"]], "onnxreducel1_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL1_11"]], "onnxreducel1_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL1_13"]], "onnxreducel1_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL1_18"]], "onnxreducel2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL2"]], "onnxreducel2_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL2_1"]], "onnxreducel2_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL2_11"]], "onnxreducel2_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL2_13"]], "onnxreducel2_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceL2_18"]], "onnxreducelogsum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSum"]], "onnxreducelogsumexp (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp"]], "onnxreducelogsumexp_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_1"]], "onnxreducelogsumexp_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_11"]], "onnxreducelogsumexp_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_13"]], "onnxreducelogsumexp_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_18"]], "onnxreducelogsum_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_1"]], "onnxreducelogsum_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_11"]], "onnxreducelogsum_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_13"]], "onnxreducelogsum_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_18"]], "onnxreducemax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax"]], "onnxreducemax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax_1"]], "onnxreducemax_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax_11"]], "onnxreducemax_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax_12"]], "onnxreducemax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax_13"]], "onnxreducemax_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMax_18"]], "onnxreducemean (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMean"]], "onnxreducemean_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMean_1"]], "onnxreducemean_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMean_11"]], "onnxreducemean_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMean_13"]], "onnxreducemean_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMean_18"]], "onnxreducemin (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin"]], "onnxreducemin_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin_1"]], "onnxreducemin_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin_11"]], "onnxreducemin_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin_12"]], "onnxreducemin_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin_13"]], "onnxreducemin_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceMin_18"]], "onnxreduceprod (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceProd"]], "onnxreduceprod_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceProd_1"]], "onnxreduceprod_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceProd_11"]], "onnxreduceprod_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceProd_13"]], "onnxreduceprod_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceProd_18"]], "onnxreducesum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSum"]], "onnxreducesumsquare (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare"]], "onnxreducesumsquare_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_1"]], "onnxreducesumsquare_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_11"]], "onnxreducesumsquare_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_13"]], "onnxreducesumsquare_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_18"]], "onnxreducesum_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSum_1"]], "onnxreducesum_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSum_11"]], "onnxreducesum_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReduceSum_13"]], "onnxrelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRelu"]], "onnxrelu_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRelu_1"]], "onnxrelu_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRelu_13"]], "onnxrelu_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRelu_14"]], "onnxrelu_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRelu_6"]], "onnxreplacetransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxReplaceTransformer"]], "onnxreshape (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReshape"]], "onnxreshape_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReshape_1"]], "onnxreshape_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReshape_13"]], "onnxreshape_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReshape_14"]], "onnxreshape_5 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReshape_5"]], "onnxresize (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize"]], "onnxresize_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize_10"]], "onnxresize_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize_11"]], "onnxresize_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize_13"]], "onnxresize_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize_18"]], "onnxresize_19 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxResize_19"]], "onnxreversesequence (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReverseSequence"]], "onnxreversesequence_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxReverseSequence_10"]], "onnxroialign (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRoiAlign"]], "onnxroialign_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRoiAlign_10"]], "onnxroialign_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRoiAlign_16"]], "onnxround (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRound"]], "onnxround_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxRound_11"]], "onnxstft (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSTFT"]], "onnxstft_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSTFT_17"]], "onnxsvmclassifier (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSVMClassifier"]], "onnxsvmclassifier_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSVMClassifier_1"]], "onnxsvmregressor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSVMRegressor"]], "onnxsvmregressor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSVMRegressor_1"]], "onnxscaler (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScaler"]], "onnxscaler_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScaler_1"]], "onnxscan (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScan"]], "onnxscan_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScan_11"]], "onnxscan_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScan_16"]], "onnxscan_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScan_8"]], "onnxscan_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScan_9"]], "onnxscatter (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatter"]], "onnxscatterelements (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterElements"]], "onnxscatterelements_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterElements_11"]], "onnxscatterelements_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterElements_13"]], "onnxscatterelements_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterElements_16"]], "onnxscatterelements_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterElements_18"]], "onnxscatternd (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterND"]], "onnxscatternd_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterND_11"]], "onnxscatternd_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterND_13"]], "onnxscatternd_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterND_16"]], "onnxscatternd_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatterND_18"]], "onnxscatter_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatter_11"]], "onnxscatter_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxScatter_9"]], "onnxselu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSelu"]], "onnxselu_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSelu_1"]], "onnxselu_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSelu_6"]], "onnxsequenceat (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceAt"]], "onnxsequenceat_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceAt_11"]], "onnxsequenceconstruct (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct"]], "onnxsequenceconstruct_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct_11"]], "onnxsequenceempty (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty"]], "onnxsequenceempty_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty_11"]], "onnxsequenceerase (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceErase"]], "onnxsequenceerase_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceErase_11"]], "onnxsequenceinsert (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceInsert"]], "onnxsequenceinsert_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceInsert_11"]], "onnxsequencelength (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceLength"]], "onnxsequencelength_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceLength_11"]], "onnxsequencemap (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceMap"]], "onnxsequencemap_17 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSequenceMap_17"]], "onnxshape (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShape"]], "onnxshape_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShape_1"]], "onnxshape_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShape_13"]], "onnxshape_15 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShape_15"]], "onnxshrink (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShrink"]], "onnxshrink_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxShrink_9"]], "onnxsigmoid (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSigmoid"]], "onnxsigmoid_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSigmoid_1"]], "onnxsigmoid_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSigmoid_13"]], "onnxsigmoid_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSigmoid_6"]], "onnxsign (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSign"]], "onnxsign_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSign_13"]], "onnxsign_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSign_9"]], "onnxsin (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSin"]], "onnxsin_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSin_7"]], "onnxsinh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSinh"]], "onnxsinh_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSinh_9"]], "onnxsize (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSize"]], "onnxsize_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSize_1"]], "onnxsize_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSize_13"]], "onnxsklearnardregression (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnARDRegression"]], "onnxsklearnadaboostclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostClassifier"]], "onnxsklearnadaboostregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostRegressor"]], "onnxsklearnbaggingclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingClassifier"]], "onnxsklearnbaggingregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingRegressor"]], "onnxsklearnbayesiangaussianmixture (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianGaussianMixture"]], "onnxsklearnbayesianridge (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianRidge"]], "onnxsklearnbernoullinb (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBernoulliNB"]], "onnxsklearnbinarizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnBinarizer"]], "onnxsklearncalibratedclassifiercv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnCalibratedClassifierCV"]], "onnxsklearncategoricalnb (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnCategoricalNB"]], "onnxsklearncolumntransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "id1"], [64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer"]], "onnxsklearncomplementnb (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnComplementNB"]], "onnxsklearncountvectorizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnCountVectorizer"]], "onnxsklearndecisiontreeclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeClassifier"]], "onnxsklearndecisiontreeregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeRegressor"]], "onnxsklearndictvectorizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnDictVectorizer"]], "onnxsklearnelasticnet (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNet"]], "onnxsklearnelasticnetcv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNetCV"]], "onnxsklearnextratreeclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeClassifier"]], "onnxsklearnextratreeregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeRegressor"]], "onnxsklearnextratreesclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesClassifier"]], "onnxsklearnextratreesregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesRegressor"]], "onnxsklearnfeaturehasher (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureHasher"]], "onnxsklearnfeatureunion (class in skl2onnx.algebra.sklearn_ops)": [[64, "id2"], [64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion"]], "onnxsklearnfunctiontransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFunctionTransformer"]], "onnxsklearngammaregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGammaRegressor"]], "onnxsklearngaussianmixture (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianMixture"]], "onnxsklearngaussiannb (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianNB"]], "onnxsklearngaussianprocessclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessClassifier"]], "onnxsklearngaussianprocessregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessRegressor"]], "onnxsklearngaussianrandomprojection (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianRandomProjection"]], "onnxsklearngenericunivariateselect (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGenericUnivariateSelect"]], "onnxsklearngradientboostingclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingClassifier"]], "onnxsklearngradientboostingregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingRegressor"]], "onnxsklearngridsearchcv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnGridSearchCV"]], "onnxsklearnhistgradientboostingclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingClassifier"]], "onnxsklearnhistgradientboostingregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingRegressor"]], "onnxsklearnhuberregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnHuberRegressor"]], "onnxsklearnincrementalpca (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnIncrementalPCA"]], "onnxsklearnisolationforest (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnIsolationForest"]], "onnxsklearnkbinsdiscretizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKBinsDiscretizer"]], "onnxsklearnkmeans (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKMeans"]], "onnxsklearnknnimputer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKNNImputer"]], "onnxsklearnkneighborsclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsClassifier"]], "onnxsklearnkneighborsregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsRegressor"]], "onnxsklearnkneighborstransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsTransformer"]], "onnxsklearnkernelcenterer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelCenterer"]], "onnxsklearnkernelpca (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelPCA"]], "onnxsklearnlabelbinarizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelBinarizer"]], "onnxsklearnlabelencoder (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelEncoder"]], "onnxsklearnlars (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLars"]], "onnxsklearnlarscv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLarsCV"]], "onnxsklearnlasso (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLasso"]], "onnxsklearnlassocv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoCV"]], "onnxsklearnlassolars (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLars"]], "onnxsklearnlassolarscv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsCV"]], "onnxsklearnlassolarsic (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsIC"]], "onnxsklearnlineardiscriminantanalysis (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearDiscriminantAnalysis"]], "onnxsklearnlinearregression (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearRegression"]], "onnxsklearnlinearsvc (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVC"]], "onnxsklearnlinearsvr (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVR"]], "onnxsklearnlocaloutlierfactor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLocalOutlierFactor"]], "onnxsklearnlogisticregression (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegression"]], "onnxsklearnlogisticregressioncv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegressionCV"]], "onnxsklearnmlpclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPClassifier"]], "onnxsklearnmlpregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPRegressor"]], "onnxsklearnmaxabsscaler (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMaxAbsScaler"]], "onnxsklearnminmaxscaler (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMinMaxScaler"]], "onnxsklearnminibatchkmeans (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMiniBatchKMeans"]], "onnxsklearnmultioutputclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputClassifier"]], "onnxsklearnmultioutputregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputRegressor"]], "onnxsklearnmultitaskelasticnet (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNet"]], "onnxsklearnmultitaskelasticnetcv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNetCV"]], "onnxsklearnmultitasklasso (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLasso"]], "onnxsklearnmultitasklassocv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLassoCV"]], "onnxsklearnmultinomialnb (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnMultinomialNB"]], "onnxsklearnnearestneighbors (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnNearestNeighbors"]], "onnxsklearnneighborhoodcomponentsanalysis (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnNeighborhoodComponentsAnalysis"]], "onnxsklearnnormalizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnNormalizer"]], "onnxsklearnnusvc (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVC"]], "onnxsklearnnusvr (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVR"]], "onnxsklearnoneclasssvm (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOneClassSVM"]], "onnxsklearnonehotencoder (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOneHotEncoder"]], "onnxsklearnonevsoneclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOneVsOneClassifier"]], "onnxsklearnonevsrestclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOneVsRestClassifier"]], "onnxsklearnordinalencoder (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOrdinalEncoder"]], "onnxsklearnorthogonalmatchingpursuit (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuit"]], "onnxsklearnorthogonalmatchingpursuitcv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuitCV"]], "onnxsklearnpca (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPCA"]], "onnxsklearnplsregression (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPLSRegression"]], "onnxsklearnpassiveaggressiveclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveClassifier"]], "onnxsklearnpassiveaggressiveregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveRegressor"]], "onnxsklearnperceptron (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPerceptron"]], "onnxsklearnpipeline (class in skl2onnx.algebra.sklearn_ops)": [[64, "id0"], [64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline"]], "onnxsklearnpoissonregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPoissonRegressor"]], "onnxsklearnpolynomialfeatures (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPolynomialFeatures"]], "onnxsklearnpowertransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPowerTransformer"]], "onnxsklearnquadraticdiscriminantanalysis (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnQuadraticDiscriminantAnalysis"]], "onnxsklearnquantileregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnQuantileRegressor"]], "onnxsklearnransacregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRANSACRegressor"]], "onnxsklearnrfe (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRFE"]], "onnxsklearnrfecv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRFECV"]], "onnxsklearnradiusneighborsclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsClassifier"]], "onnxsklearnradiusneighborsregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsRegressor"]], "onnxsklearnrandomforestclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestClassifier"]], "onnxsklearnrandomforestregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestRegressor"]], "onnxsklearnrandomtreesembedding (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomTreesEmbedding"]], "onnxsklearnridge (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRidge"]], "onnxsklearnridgecv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeCV"]], "onnxsklearnridgeclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifier"]], "onnxsklearnridgeclassifiercv (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifierCV"]], "onnxsklearnrobustscaler (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnRobustScaler"]], "onnxsklearnsgdclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDClassifier"]], "onnxsklearnsgdoneclasssvm (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDOneClassSVM"]], "onnxsklearnsgdregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDRegressor"]], "onnxsklearnsvc (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSVC"]], "onnxsklearnsvr (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSVR"]], "onnxsklearnselectfdr (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFdr"]], "onnxsklearnselectfpr (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFpr"]], "onnxsklearnselectfrommodel (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFromModel"]], "onnxsklearnselectfwe (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFwe"]], "onnxsklearnselectkbest (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectKBest"]], "onnxsklearnselectpercentile (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectPercentile"]], "onnxsklearnsimpleimputer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnSimpleImputer"]], "onnxsklearnstackingclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingClassifier"]], "onnxsklearnstackingregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingRegressor"]], "onnxsklearnstandardscaler (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnStandardScaler"]], "onnxsklearntfidftransformer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfTransformer"]], "onnxsklearntfidfvectorizer (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfVectorizer"]], "onnxsklearntheilsenregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnTheilSenRegressor"]], "onnxsklearntruncatedsvd (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnTruncatedSVD"]], "onnxsklearntweedieregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnTweedieRegressor"]], "onnxsklearnvariancethreshold (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnVarianceThreshold"]], "onnxsklearnvotingclassifier (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingClassifier"]], "onnxsklearnvotingregressor (class in skl2onnx.algebra.sklearn_ops)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingRegressor"]], "onnxslice (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSlice"]], "onnxslice_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSlice_1"]], "onnxslice_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSlice_10"]], "onnxslice_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSlice_11"]], "onnxslice_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSlice_13"]], "onnxsoftmax (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmax"]], "onnxsoftmaxcrossentropyloss (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss"]], "onnxsoftmaxcrossentropyloss_12 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_12"]], "onnxsoftmaxcrossentropyloss_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_13"]], "onnxsoftmax_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmax_1"]], "onnxsoftmax_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmax_11"]], "onnxsoftmax_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftmax_13"]], "onnxsoftplus (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftplus"]], "onnxsoftplus_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftplus_1"]], "onnxsoftsign (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftsign"]], "onnxsoftsign_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSoftsign_1"]], "onnxspacetodepth (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth"]], "onnxspacetodepth_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_1"]], "onnxspacetodepth_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_13"]], "onnxsplit (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit"]], "onnxsplittosequence (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplitToSequence"]], "onnxsplittosequence_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplitToSequence_11"]], "onnxsplit_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit_1"]], "onnxsplit_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit_11"]], "onnxsplit_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit_13"]], "onnxsplit_18 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit_18"]], "onnxsplit_2 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSplit_2"]], "onnxsqrt (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqrt"]], "onnxsqrt_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqrt_1"]], "onnxsqrt_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqrt_13"]], "onnxsqrt_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqrt_6"]], "onnxsqueeze (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqueeze"]], "onnxsqueeze_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqueeze_1"]], "onnxsqueeze_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqueeze_11"]], "onnxsqueeze_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSqueeze_13"]], "onnxstringnormalizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxStringNormalizer"]], "onnxstringnormalizer_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxStringNormalizer_10"]], "onnxsub (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub"]], "onnxsub_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub_1"]], "onnxsub_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub_13"]], "onnxsub_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub_14"]], "onnxsub_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub_6"]], "onnxsub_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSub_7"]], "onnxsum (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSum"]], "onnxsum_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSum_1"]], "onnxsum_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSum_13"]], "onnxsum_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSum_6"]], "onnxsum_8 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxSum_8"]], "onnxtan (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTan"]], "onnxtan_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTan_7"]], "onnxtanh (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTanh"]], "onnxtanh_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTanh_1"]], "onnxtanh_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTanh_13"]], "onnxtanh_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTanh_6"]], "onnxtfidfvectorizer (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer"]], "onnxtfidfvectorizer_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer_9"]], "onnxthresholdedrelu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu"]], "onnxthresholdedrelu_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu_10"]], "onnxtile (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTile"]], "onnxtile_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTile_1"]], "onnxtile_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTile_13"]], "onnxtile_6 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTile_6"]], "onnxtopk (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTopK"]], "onnxtopk_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTopK_1"]], "onnxtopk_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTopK_10"]], "onnxtopk_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTopK_11"]], "onnxtranspose (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTranspose"]], "onnxtranspose_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTranspose_1"]], "onnxtranspose_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTranspose_13"]], "onnxtreeensembleclassifier (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier"]], "onnxtreeensembleclassifier_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_1"]], "onnxtreeensembleclassifier_3 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_3"]], "onnxtreeensembleregressor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor"]], "onnxtreeensembleregressor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_1"]], "onnxtreeensembleregressor_3 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_3"]], "onnxtrilu (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTrilu"]], "onnxtrilu_14 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxTrilu_14"]], "onnxunique (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnique"]], "onnxunique_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnique_11"]], "onnxunsqueeze (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnsqueeze"]], "onnxunsqueeze_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_1"]], "onnxunsqueeze_11 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_11"]], "onnxunsqueeze_13 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_13"]], "onnxupsample (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUpsample"]], "onnxupsample_10 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUpsample_10"]], "onnxupsample_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUpsample_7"]], "onnxupsample_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxUpsample_9"]], "onnxwhere (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxWhere"]], "onnxwhere_16 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxWhere_16"]], "onnxwhere_9 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxWhere_9"]], "onnxxor (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxXor"]], "onnxxor_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxXor_1"]], "onnxxor_7 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxXor_7"]], "onnxzipmap (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxZipMap"]], "onnxzipmap_1 (class in skl2onnx.algebra.onnx_ops)": [[64, "skl2onnx.algebra.onnx_ops.OnnxZipMap_1"]], "onnx_converter() (skl2onnx.algebra.sklearn_ops.onnxsklearncolumntransformer method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer.onnx_converter"]], "onnx_converter() (skl2onnx.algebra.sklearn_ops.onnxsklearnfeatureunion method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion.onnx_converter"]], "onnx_converter() (skl2onnx.algebra.sklearn_ops.onnxsklearnpipeline method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline.onnx_converter"]], "onnx_parser() (skl2onnx.algebra.sklearn_ops.onnxsklearncolumntransformer method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer.onnx_parser"]], "onnx_parser() (skl2onnx.algebra.sklearn_ops.onnxsklearnfeatureunion method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion.onnx_parser"]], "onnx_parser() (skl2onnx.algebra.sklearn_ops.onnxsklearnpipeline method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline.onnx_parser"]], "onnx_shape_calculator() (skl2onnx.algebra.sklearn_ops.onnxsklearncolumntransformer method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer.onnx_shape_calculator"]], "onnx_shape_calculator() (skl2onnx.algebra.sklearn_ops.onnxsklearnfeatureunion method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion.onnx_shape_calculator"]], "onnx_shape_calculator() (skl2onnx.algebra.sklearn_ops.onnxsklearnpipeline method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline.onnx_shape_calculator"]], "to_onnx() (skl2onnx.algebra.sklearn_ops.onnxsklearncolumntransformer method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer.to_onnx"]], "to_onnx() (skl2onnx.algebra.sklearn_ops.onnxsklearnfeatureunion method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion.to_onnx"]], "to_onnx() (skl2onnx.algebra.sklearn_ops.onnxsklearnpipeline method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline.to_onnx"]], "to_onnx_operator() (skl2onnx.algebra.sklearn_ops.onnxsklearncolumntransformer method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer.to_onnx_operator"]], "to_onnx_operator() (skl2onnx.algebra.sklearn_ops.onnxsklearnfeatureunion method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion.to_onnx_operator"]], "to_onnx_operator() (skl2onnx.algebra.sklearn_ops.onnxsklearnpipeline method)": [[64, "skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline.to_onnx_operator"]]}}) \ No newline at end of file diff --git a/supported.html b/supported.html index 145fe33b8..85dce1481 100644 --- a/supported.html +++ b/supported.html @@ -1,32660 +1,36567 @@ - - - - - - - - - Supported scikit-learn Models — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- - -
- - - - -
- - - - -
- -
- - -
- - - - - - -
- -
- -
-

Supported scikit-learn Models#

-

skl2onnx currently can convert the following list -of models for skl2onnx . They -were tested using onnxruntime . -All the following classes overloads the following methods -such as OnnxSklearnPipeline does. They wrap existing -scikit-learn classes by dynamically creating a new one -which inherits from OnnxOperatorMixin which -implements to_onnx methods.

- -
-

Covered Converters#

-
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Name

Package

Supported

ARDRegression

linear_model

Yes

AdaBoostClassifier

ensemble

Yes

AdaBoostRegressor

ensemble

Yes

AdditiveChi2Sampler

kernel_approximation

AffinityPropagation

cluster

AgglomerativeClustering

cluster

BaggingClassifier

ensemble

Yes

BaggingRegressor

ensemble

Yes

BaseDecisionTree

tree

BaseEnsemble

ensemble

BayesianGaussianMixture

mixture

Yes

BayesianRidge

linear_model

Yes

BernoulliNB

naive_bayes

Yes

BernoulliRBM

neural_network

Binarizer

preprocessing

Yes

Birch

cluster

CCA

cross_decomposition

CalibratedClassifierCV

calibration

Yes

CategoricalNB

naive_bayes

Yes

ClassifierChain

multioutput

ComplementNB

naive_bayes

Yes

DBSCAN

cluster

DecisionTreeClassifier

tree

Yes

DecisionTreeRegressor

tree

Yes

DictVectorizer

feature_extraction

Yes

DictionaryLearning

decomposition

ElasticNet

linear_model

Yes

ElasticNetCV

linear_model

Yes

EllipticEnvelope

covariance

EmpiricalCovariance

covariance

ExtraTreeClassifier

tree

Yes

ExtraTreeRegressor

tree

Yes

ExtraTreesClassifier

ensemble

Yes

ExtraTreesRegressor

ensemble

Yes

FactorAnalysis

decomposition

FastICA

decomposition

FeatureAgglomeration

cluster

FeatureHasher

feature_extraction

FunctionTransformer

preprocessing

Yes

GammaRegressor

linear_model

GaussianMixture

mixture

Yes

GaussianNB

naive_bayes

Yes

GaussianProcessClassifier

gaussian_process

Yes

GaussianProcessRegressor

gaussian_process

Yes

GaussianRandomProjection

random_projection

Yes

GenericUnivariateSelect

feature_selection

Yes

GradientBoostingClassifier

ensemble

Yes

GradientBoostingRegressor

ensemble

Yes

GraphicalLasso

covariance

GraphicalLassoCV

covariance

GridSearchCV

model_selection

Yes

HistGradientBoostingClassifier

ensemble

Yes

HistGradientBoostingRegressor

ensemble

Yes

HuberRegressor

linear_model

Yes

IncrementalPCA

decomposition

Yes

IsolationForest

ensemble

Yes

IsotonicRegression

isotonic

KBinsDiscretizer

preprocessing

Yes

KMeans

cluster

Yes

KNNImputer

impute

Yes

KNeighborsClassifier

neighbors

Yes

KNeighborsRegressor

neighbors

Yes

KNeighborsTransformer

neighbors

Yes

KernelCenterer

preprocessing

Yes

KernelDensity

neighbors

KernelPCA

decomposition

Yes

KernelRidge

kernel_ridge

LabelBinarizer

preprocessing

Yes

LabelEncoder

preprocessing

Yes

LabelPropagation

semi_supervised

LabelSpreading

semi_supervised

Lars

linear_model

Yes

LarsCV

linear_model

Yes

Lasso

linear_model

Yes

LassoCV

linear_model

Yes

LassoLars

linear_model

Yes

LassoLarsCV

linear_model

Yes

LassoLarsIC

linear_model

Yes

LatentDirichletAllocation

decomposition

LedoitWolf

covariance

LinearDiscriminantAnalysis

discriminant_analysis

Yes

LinearRegression

linear_model

Yes

LinearSVC

svm

Yes

LinearSVR

svm

Yes

LocalOutlierFactor

neighbors

Yes

LogisticRegression

linear_model

Yes

LogisticRegressionCV

linear_model

Yes

MLPClassifier

neural_network

Yes

MLPRegressor

neural_network

Yes

MaxAbsScaler

preprocessing

Yes

MeanShift

cluster

MinCovDet

covariance

MinMaxScaler

preprocessing

Yes

MiniBatchDictionaryLearning

decomposition

MiniBatchKMeans

cluster

Yes

MiniBatchSparsePCA

decomposition

MissingIndicator

impute

MultiLabelBinarizer

preprocessing

MultiOutputClassifier

multioutput

Yes

MultiOutputRegressor

multioutput

Yes

MultiTaskElasticNet

linear_model

Yes

MultiTaskElasticNetCV

linear_model

Yes

MultiTaskLasso

linear_model

Yes

MultiTaskLassoCV

linear_model

Yes

MultinomialNB

naive_bayes

Yes

NMF

decomposition

NearestCentroid

neighbors

NearestNeighbors

neighbors

Yes

NeighborhoodComponentsAnalysis

neighbors

Yes

Normalizer

preprocessing

Yes

NuSVC

svm

Yes

NuSVR

svm

Yes

Nystroem

kernel_approximation

OAS

covariance

OPTICS

cluster

OneClassSVM

svm

Yes

OneHotEncoder

preprocessing

Yes

OneVsOneClassifier

multiclass

OneVsRestClassifier

multiclass

Yes

OrdinalEncoder

preprocessing

Yes

OrthogonalMatchingPursuit

linear_model

Yes

OrthogonalMatchingPursuitCV

linear_model

Yes

OutputCodeClassifier

multiclass

PCA

decomposition

Yes

PLSCanonical

cross_decomposition

PLSRegression

cross_decomposition

Yes

PLSSVD

cross_decomposition

PassiveAggressiveClassifier

linear_model

Yes

PassiveAggressiveRegressor

linear_model

Yes

Perceptron

linear_model

Yes

PoissonRegressor

linear_model

Yes

PolynomialCountSketch

kernel_approximation

PolynomialFeatures

preprocessing

Yes

PowerTransformer

preprocessing

Yes

QuadraticDiscriminantAnalysis

discriminant_analysis

QuantileRegressor

linear_model

Yes

QuantileTransformer

preprocessing

RANSACRegressor

linear_model

Yes

RBFSampler

kernel_approximation

RFE

feature_selection

Yes

RFECV

feature_selection

Yes

RadiusNeighborsClassifier

neighbors

Yes

RadiusNeighborsRegressor

neighbors

Yes

RadiusNeighborsTransformer

neighbors

RandomForestClassifier

ensemble

Yes

RandomForestRegressor

ensemble

Yes

RandomTreesEmbedding

ensemble

Yes

RandomizedSearchCV

model_selection

RegressorChain

multioutput

Ridge

linear_model

Yes

RidgeCV

linear_model

Yes

RidgeClassifier

linear_model

Yes

RidgeClassifierCV

linear_model

Yes

RobustScaler

preprocessing

Yes

SGDClassifier

linear_model

Yes

SGDOneClassSVM

linear_model

SGDRegressor

linear_model

Yes

SVC

svm

Yes

SVR

svm

Yes

SelectFdr

feature_selection

Yes

SelectFpr

feature_selection

Yes

SelectFromModel

feature_selection

Yes

SelectFwe

feature_selection

Yes

SelectKBest

feature_selection

Yes

SelectPercentile

feature_selection

Yes

SelfTrainingClassifier

semi_supervised

SequentialFeatureSelector

feature_selection

ShrunkCovariance

covariance

SimpleImputer

impute

Yes

SkewedChi2Sampler

kernel_approximation

SparseCoder

decomposition

SparsePCA

decomposition

SparseRandomProjection

random_projection

SpectralBiclustering

cluster

SpectralClustering

cluster

SpectralCoclustering

cluster

SplineTransformer

preprocessing

StackingClassifier

ensemble

Yes

StackingRegressor

ensemble

Yes

StandardScaler

preprocessing

Yes

TheilSenRegressor

linear_model

Yes

TransformedTargetRegressor

compose

TruncatedSVD

decomposition

Yes

TweedieRegressor

linear_model

Yes

VarianceThreshold

feature_selection

Yes

VotingClassifier

ensemble

Yes

VotingRegressor

ensemble

Yes

-

scikit-learn’s version is 1.0. -125/187 models are covered.

-
-
-
-
-

Converters Documentation#

-
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

OnnxBooster

OnnxSklearnKNNImputer

OnnxSklearnPerceptron

OnnxCastRegressor

OnnxSklearnKNeighborsClassifier

OnnxSklearnPipeline

OnnxCastTransformer

OnnxSklearnKNeighborsRegressor

OnnxSklearnPoissonRegressor

OnnxCustomScorerTransform

OnnxSklearnKNeighborsTransformer

OnnxSklearnPolynomialFeatures

OnnxDecorrelateTransformer

OnnxSklearnKernelCenterer

OnnxSklearnPowerTransformer

OnnxLiveDecorrelateTransformer

OnnxSklearnKernelPCA

OnnxSklearnQuantileRegressor

OnnxMockWrappedLightGbmBoosterClassifier

OnnxSklearnLGBMClassifier

OnnxSklearnRANSACRegressor

OnnxOrdinalEncoder

OnnxSklearnLGBMRegressor

OnnxSklearnRFE

OnnxPredictableTSNE

OnnxSklearnLabelBinarizer

OnnxSklearnRFECV

OnnxReplaceTransformer

OnnxSklearnLabelEncoder

OnnxSklearnRadiusNeighborsClassifier

OnnxSklearnARDRegression

OnnxSklearnLars

OnnxSklearnRadiusNeighborsRegressor

OnnxSklearnAdaBoostClassifier

OnnxSklearnLarsCV

OnnxSklearnRandomForestClassifier

OnnxSklearnAdaBoostRegressor

OnnxSklearnLasso

OnnxSklearnRandomForestRegressor

OnnxSklearnBaggingClassifier

OnnxSklearnLassoCV

OnnxSklearnRandomTreesEmbedding

OnnxSklearnBaggingRegressor

OnnxSklearnLassoLars

OnnxSklearnRidge

OnnxSklearnBayesianGaussianMixture

OnnxSklearnLassoLarsCV

OnnxSklearnRidgeCV

OnnxSklearnBayesianRidge

OnnxSklearnLassoLarsIC

OnnxSklearnRidgeClassifier

OnnxSklearnBernoulliNB

OnnxSklearnLinearDiscriminantAnalysis

OnnxSklearnRidgeClassifierCV

OnnxSklearnBinarizer

OnnxSklearnLinearRegression

OnnxSklearnRobustScaler

OnnxSklearnCalibratedClassifierCV

OnnxSklearnLinearSVC

OnnxSklearnSGDClassifier

OnnxSklearnCategoricalNB

OnnxSklearnLinearSVR

OnnxSklearnSGDRegressor

OnnxSklearnColumnTransformer

OnnxSklearnLocalOutlierFactor

OnnxSklearnSVC

OnnxSklearnComplementNB

OnnxSklearnLogisticRegression

OnnxSklearnSVR

OnnxSklearnCountVectorizer

OnnxSklearnLogisticRegressionCV

OnnxSklearnSelectFdr

OnnxSklearnDecisionTreeClassifier

OnnxSklearnMLPClassifier

OnnxSklearnSelectFpr

OnnxSklearnDecisionTreeRegressor

OnnxSklearnMLPRegressor

OnnxSklearnSelectFromModel

OnnxSklearnDictVectorizer

OnnxSklearnMaxAbsScaler

OnnxSklearnSelectFwe

OnnxSklearnElasticNet

OnnxSklearnMinMaxScaler

OnnxSklearnSelectKBest

OnnxSklearnElasticNetCV

OnnxSklearnMiniBatchKMeans

OnnxSklearnSelectPercentile

OnnxSklearnExtraTreeClassifier

OnnxSklearnMultiOutputClassifier

OnnxSklearnSimpleImputer

OnnxSklearnExtraTreeRegressor

OnnxSklearnMultiOutputRegressor

OnnxSklearnStackingClassifier

OnnxSklearnExtraTreesClassifier

OnnxSklearnMultiTaskElasticNet

OnnxSklearnStackingRegressor

OnnxSklearnExtraTreesRegressor

OnnxSklearnMultiTaskElasticNetCV

OnnxSklearnStandardScaler

OnnxSklearnFeatureUnion

OnnxSklearnMultiTaskLasso

OnnxSklearnTfidfTransformer

OnnxSklearnFunctionTransformer

OnnxSklearnMultiTaskLassoCV

OnnxSklearnTfidfVectorizer

OnnxSklearnGaussianMixture

OnnxSklearnMultinomialNB

OnnxSklearnTheilSenRegressor

OnnxSklearnGaussianNB

OnnxSklearnNearestNeighbors

OnnxSklearnTruncatedSVD

OnnxSklearnGaussianProcessClassifier

OnnxSklearnNeighborhoodComponentsAnalysis

OnnxSklearnTweedieRegressor

OnnxSklearnGaussianProcessRegressor

OnnxSklearnNormalizer

OnnxSklearnVarianceThreshold

OnnxSklearnGaussianRandomProjection

OnnxSklearnNuSVC

OnnxSklearnVotingClassifier

OnnxSklearnGenericUnivariateSelect

OnnxSklearnNuSVR

OnnxSklearnVotingRegressor

OnnxSklearnGradientBoostingClassifier

OnnxSklearnOneClassSVM

OnnxSklearnXGBClassifier

OnnxSklearnGradientBoostingRegressor

OnnxSklearnOneHotEncoder

OnnxSklearnXGBRegressor

OnnxSklearnGridSearchCV

OnnxSklearnOneVsRestClassifier

OnnxTransferTransformer

OnnxSklearnHistGradientBoostingClassifier

OnnxSklearnOrdinalEncoder

OnnxValidatorClassifier

OnnxSklearnHistGradientBoostingRegressor

OnnxSklearnOrthogonalMatchingPursuit

OnnxWOEEncoder

OnnxSklearnHuberRegressor

OnnxSklearnOrthogonalMatchingPursuitCV

OnnxWOETransformer

OnnxSklearnIncrementalPCA

OnnxSklearnPCA

OnnxWrappedLightGbmBooster

OnnxSklearnIsolationForest

OnnxSklearnPLSRegression

OnnxWrappedLightGbmBoosterClassifier

OnnxSklearnKBinsDiscretizer

OnnxSklearnPassiveAggressiveClassifier

OnnxSklearnKMeans

OnnxSklearnPassiveAggressiveRegressor

-
-
-
-

OnnxBooster#

-
-
-
-
-

OnnxCastRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxCastRegressor(estimator, *, dtype=<class 'numpy.float32'>)#
-

OnnxOperatorMixin for CastRegressor

-
- -
-
-
-
-

OnnxCastTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxCastTransformer(*, dtype=<class 'numpy.float32'>)#
-

OnnxOperatorMixin for CastTransformer

-
- -
-
-
-
-

OnnxCustomScorerTransform#

-
-
-
-
-

OnnxDecorrelateTransformer#

-
-
-
-
-

OnnxLiveDecorrelateTransformer#

-
-
-
-
-

OnnxMockWrappedLightGbmBoosterClassifier#

-
-
-
-
-

OnnxOrdinalEncoder#

-
-
-
-
-

OnnxPredictableTSNE#

-
-
-
-
-

OnnxReplaceTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxReplaceTransformer(*, from_value=0, to_value=nan, dtype=<class 'numpy.float32'>)#
-

OnnxOperatorMixin for ReplaceTransformer

-
- -
-
-
-
-

OnnxSklearnARDRegression#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnARDRegression(*, n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06, lambda_2=1e-06, compute_score=False, threshold_lambda=10000.0, fit_intercept=True, normalize='deprecated', copy_X=True, verbose=False)#
-

OnnxOperatorMixin for ARDRegression

-
- -
-
-
-
-

OnnxSklearnAdaBoostClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostClassifier(base_estimator=None, *, n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None)#
-

OnnxOperatorMixin for AdaBoostClassifier

-
- -
-
-
-
-

OnnxSklearnAdaBoostRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostRegressor(base_estimator=None, *, n_estimators=50, learning_rate=1.0, loss='linear', random_state=None)#
-

OnnxOperatorMixin for AdaBoostRegressor

-
- -
-
-
-
-

OnnxSklearnBaggingClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingClassifier(base_estimator=None, n_estimators=10, *, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0)#
-

OnnxOperatorMixin for BaggingClassifier

-
- -
-
-
-
-

OnnxSklearnBaggingRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingRegressor(base_estimator=None, n_estimators=10, *, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0)#
-

OnnxOperatorMixin for BaggingRegressor

-
- -
-
-
-
-

OnnxSklearnBayesianGaussianMixture#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianGaussianMixture(*, n_components=1, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weight_concentration_prior_type='dirichlet_process', weight_concentration_prior=None, mean_precision_prior=None, mean_prior=None, degrees_of_freedom_prior=None, covariance_prior=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10)#
-

OnnxOperatorMixin for BayesianGaussianMixture

-
- -
-
-
-
-

OnnxSklearnBayesianRidge#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianRidge(*, n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06, lambda_2=1e-06, alpha_init=None, lambda_init=None, compute_score=False, fit_intercept=True, normalize='deprecated', copy_X=True, verbose=False)#
-

OnnxOperatorMixin for BayesianRidge

-
- -
-
-
-
-

OnnxSklearnBernoulliNB#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBernoulliNB(*, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None)#
-

OnnxOperatorMixin for BernoulliNB

-
- -
-
-
-
-

OnnxSklearnBinarizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnBinarizer(*, threshold=0.0, copy=True)#
-

OnnxOperatorMixin for Binarizer

-
- -
-
-
-
-

OnnxSklearnCalibratedClassifierCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnCalibratedClassifierCV(base_estimator=None, *, method='sigmoid', cv=None, n_jobs=None, ensemble=True)#
-

OnnxOperatorMixin for CalibratedClassifierCV

-
- -
-
-
-
-

OnnxSklearnCategoricalNB#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnCategoricalNB(*, alpha=1.0, fit_prior=True, class_prior=None, min_categories=None)#
-

OnnxOperatorMixin for CategoricalNB

-
- -
-
-
-
-

OnnxSklearnColumnTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer(transformers, *, remainder='drop', sparse_threshold=0.3, n_jobs=None, transformer_weights=None, verbose=False, verbose_feature_names_out=True)[source]#
-

OnnxOperatorMixin for ColumnTransformer

-
- -
-
-
-
-

OnnxSklearnComplementNB#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnComplementNB(*, alpha=1.0, fit_prior=True, class_prior=None, norm=False)#
-

OnnxOperatorMixin for ComplementNB

-
- -
-
-
-
-

OnnxSklearnCountVectorizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnCountVectorizer(*, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern='(?u)\\b\\w\\w+\\b', ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=<class 'numpy.int64'>)#
-

OnnxOperatorMixin for CountVectorizer

-
- -
-
-
-
-

OnnxSklearnDecisionTreeClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeClassifier(*, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0)#
-

OnnxOperatorMixin for DecisionTreeClassifier

-
- -
-
-
-
-

OnnxSklearnDecisionTreeRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeRegressor(*, criterion='squared_error', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, ccp_alpha=0.0)#
-

OnnxOperatorMixin for DecisionTreeRegressor

-
- -
-
-
-
-

OnnxSklearnDictVectorizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnDictVectorizer(*, dtype=<class 'numpy.float64'>, separator='=', sparse=True, sort=True)#
-

OnnxOperatorMixin for DictVectorizer

-
- -
-
-
-
-

OnnxSklearnElasticNet#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNet(alpha=1.0, *, l1_ratio=0.5, fit_intercept=True, normalize='deprecated', precompute=False, max_iter=1000, copy_X=True, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for ElasticNet

-
- -
-
-
-
-

OnnxSklearnElasticNetCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNetCV(*, l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize='deprecated', precompute='auto', max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, positive=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for ElasticNetCV

-
- -
-
-
-
-

OnnxSklearnExtraTreeClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeClassifier(*, criterion='gini', splitter='random', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0)#
-

OnnxOperatorMixin for ExtraTreeClassifier

-
- -
-
-
-
-

OnnxSklearnExtraTreeRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeRegressor(*, criterion='squared_error', splitter='random', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', random_state=None, min_impurity_decrease=0.0, max_leaf_nodes=None, ccp_alpha=0.0)#
-

OnnxOperatorMixin for ExtraTreeRegressor

-
- -
-
-
-
-

OnnxSklearnExtraTreesClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesClassifier(n_estimators=100, *, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=False, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)#
-

OnnxOperatorMixin for ExtraTreesClassifier

-
- -
-
-
-
-

OnnxSklearnExtraTreesRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesRegressor(n_estimators=100, *, criterion='squared_error', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=False, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None)#
-

OnnxOperatorMixin for ExtraTreesRegressor

-
- -
-
-
-
-

OnnxSklearnFeatureUnion#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion(transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False)[source]#
-

OnnxOperatorMixin for FeatureUnion

-
- -
-
-
-
-

OnnxSklearnFunctionTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnFunctionTransformer(func=None, inverse_func=None, *, validate=False, accept_sparse=False, check_inverse=True, kw_args=None, inv_kw_args=None)#
-

OnnxOperatorMixin for FunctionTransformer

-
- -
-
-
-
-

OnnxSklearnGaussianMixture#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianMixture(n_components=1, *, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10)#
-

OnnxOperatorMixin for GaussianMixture

-
- -
-
-
-
-

OnnxSklearnGaussianNB#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianNB(*, priors=None, var_smoothing=1e-09)#
-

OnnxOperatorMixin for GaussianNB

-
- -
-
-
-
-

OnnxSklearnGaussianProcessClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessClassifier(kernel=None, *, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class='one_vs_rest', n_jobs=None)#
-

OnnxOperatorMixin for GaussianProcessClassifier

-
- -
-
-
-
-

OnnxSklearnGaussianProcessRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessRegressor(kernel=None, *, alpha=1e-10, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None)#
-

OnnxOperatorMixin for GaussianProcessRegressor

-
- -
-
-
-
-

OnnxSklearnGaussianRandomProjection#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianRandomProjection(n_components='auto', *, eps=0.1, random_state=None)#
-

OnnxOperatorMixin for GaussianRandomProjection

-
- -
-
-
-
-

OnnxSklearnGenericUnivariateSelect#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGenericUnivariateSelect(score_func=<function f_classif>, *, mode='percentile', param=1e-05)#
-

OnnxOperatorMixin for GenericUnivariateSelect

-
- -
-
-
-
-

OnnxSklearnGradientBoostingClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingClassifier(*, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)#
-

OnnxOperatorMixin for GradientBoostingClassifier

-
- -
-
-
-
-

OnnxSklearnGradientBoostingRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingRegressor(*, loss='squared_error', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)#
-

OnnxOperatorMixin for GradientBoostingRegressor

-
- -
-
-
-
-

OnnxSklearnGridSearchCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnGridSearchCV(estimator, param_grid, *, scoring=None, n_jobs=None, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score=nan, return_train_score=False)#
-

OnnxOperatorMixin for GridSearchCV

-
- -
-
-
-
-

OnnxSklearnHistGradientBoostingClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingClassifier(loss='auto', *, learning_rate=0.1, max_iter=100, max_leaf_nodes=31, max_depth=None, min_samples_leaf=20, l2_regularization=0.0, max_bins=255, categorical_features=None, monotonic_cst=None, warm_start=False, early_stopping='auto', scoring='loss', validation_fraction=0.1, n_iter_no_change=10, tol=1e-07, verbose=0, random_state=None)#
-

OnnxOperatorMixin for HistGradientBoostingClassifier

-
- -
-
-
-
-

OnnxSklearnHistGradientBoostingRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingRegressor(loss='squared_error', *, learning_rate=0.1, max_iter=100, max_leaf_nodes=31, max_depth=None, min_samples_leaf=20, l2_regularization=0.0, max_bins=255, categorical_features=None, monotonic_cst=None, warm_start=False, early_stopping='auto', scoring='loss', validation_fraction=0.1, n_iter_no_change=10, tol=1e-07, verbose=0, random_state=None)#
-

OnnxOperatorMixin for HistGradientBoostingRegressor

-
- -
-
-
-
-

OnnxSklearnHuberRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnHuberRegressor(*, epsilon=1.35, max_iter=100, alpha=0.0001, warm_start=False, fit_intercept=True, tol=1e-05)#
-

OnnxOperatorMixin for HuberRegressor

-
- -
-
-
-
-

OnnxSklearnIncrementalPCA#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnIncrementalPCA(n_components=None, *, whiten=False, copy=True, batch_size=None)#
-

OnnxOperatorMixin for IncrementalPCA

-
- -
-
-
-
-

OnnxSklearnIsolationForest#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnIsolationForest(*, n_estimators=100, max_samples='auto', contamination='auto', max_features=1.0, bootstrap=False, n_jobs=None, random_state=None, verbose=0, warm_start=False)#
-

OnnxOperatorMixin for IsolationForest

-
- -
-
-
-
-

OnnxSklearnKBinsDiscretizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKBinsDiscretizer(n_bins=5, *, encode='onehot', strategy='quantile', dtype=None)#
-

OnnxOperatorMixin for KBinsDiscretizer

-
- -
-
-
-
-

OnnxSklearnKMeans#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKMeans(n_clusters=8, *, init='k-means++', n_init=10, max_iter=300, tol=0.0001, verbose=0, random_state=None, copy_x=True, algorithm='auto')#
-

OnnxOperatorMixin for KMeans

-
- -
-
-
-
-

OnnxSklearnKNNImputer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNNImputer(*, missing_values=nan, n_neighbors=5, weights='uniform', metric='nan_euclidean', copy=True, add_indicator=False)#
-

OnnxOperatorMixin for KNNImputer

-
- -
-
-
-
-

OnnxSklearnKNeighborsClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsClassifier(n_neighbors=5, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
-

OnnxOperatorMixin for KNeighborsClassifier

-
- -
-
-
-
-

OnnxSklearnKNeighborsRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsRegressor(n_neighbors=5, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
-

OnnxOperatorMixin for KNeighborsRegressor

-
- -
-
-
-
-

OnnxSklearnKNeighborsTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsTransformer(*, mode='distance', n_neighbors=5, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=1)#
-

OnnxOperatorMixin for KNeighborsTransformer

-
- -
-
-
-
-

OnnxSklearnKernelCenterer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelCenterer#
-

OnnxOperatorMixin for KernelCenterer

-
- -
-
-
-
-

OnnxSklearnKernelPCA#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelPCA(n_components=None, *, kernel='linear', gamma=None, degree=3, coef0=1, kernel_params=None, alpha=1.0, fit_inverse_transform=False, eigen_solver='auto', tol=0, max_iter=None, iterated_power='auto', remove_zero_eig=False, random_state=None, copy_X=True, n_jobs=None)#
-

OnnxOperatorMixin for KernelPCA

-
- -
-
-
-
-

OnnxSklearnLGBMClassifier#

-
-
-
-
-

OnnxSklearnLGBMRegressor#

-
-
-
-
-

OnnxSklearnLabelBinarizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelBinarizer(*, neg_label=0, pos_label=1, sparse_output=False)#
-

OnnxOperatorMixin for LabelBinarizer

-
- -
-
-
-
-

OnnxSklearnLabelEncoder#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelEncoder#
-

OnnxOperatorMixin for LabelEncoder

-
- -
-
-
-
-

OnnxSklearnLars#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLars(*, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', n_nonzero_coefs=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, jitter=None, random_state=None)#
-

OnnxOperatorMixin for Lars

-
- -
-
-
-
-

OnnxSklearnLarsCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLarsCV(*, fit_intercept=True, verbose=False, max_iter=500, normalize='deprecated', precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True)#
-

OnnxOperatorMixin for LarsCV

-
- -
-
-
-
-

OnnxSklearnLasso#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLasso(alpha=1.0, *, fit_intercept=True, normalize='deprecated', precompute=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for Lasso

-
- -
-
-
-
-

OnnxSklearnLassoCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoCV(*, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize='deprecated', precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for LassoCV

-
- -
-
-
-
-

OnnxSklearnLassoLars#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLars(alpha=1.0, *, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False, jitter=None, random_state=None)#
-

OnnxOperatorMixin for LassoLars

-
- -
-
-
-
-

OnnxSklearnLassoLarsCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsCV(*, fit_intercept=True, verbose=False, max_iter=500, normalize='deprecated', precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True, positive=False)#
-

OnnxOperatorMixin for LassoLarsCV

-
- -
-
-
-
-

OnnxSklearnLassoLarsIC#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsIC(criterion='aic', *, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, positive=False)#
-

OnnxOperatorMixin for LassoLarsIC

-
- -
-
-
-
-

OnnxSklearnLinearDiscriminantAnalysis#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearDiscriminantAnalysis(solver='svd', shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=0.0001, covariance_estimator=None)#
-

OnnxOperatorMixin for LinearDiscriminantAnalysis

-
- -
-
-
-
-

OnnxSklearnLinearRegression#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearRegression(*, fit_intercept=True, normalize='deprecated', copy_X=True, n_jobs=None, positive=False)#
-

OnnxOperatorMixin for LinearRegression

-
- -
-
-
-
-

OnnxSklearnLinearSVC#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVC(penalty='l2', loss='squared_hinge', *, dual=True, tol=0.0001, C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000)#
-

OnnxOperatorMixin for LinearSVC

-
- -
-
-
-
-

OnnxSklearnLinearSVR#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVR(*, epsilon=0.0, tol=0.0001, C=1.0, loss='epsilon_insensitive', fit_intercept=True, intercept_scaling=1.0, dual=True, verbose=0, random_state=None, max_iter=1000)#
-

OnnxOperatorMixin for LinearSVR

-
- -
-
-
-
-

OnnxSklearnLocalOutlierFactor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLocalOutlierFactor(n_neighbors=20, *, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, contamination='auto', novelty=False, n_jobs=None)#
-

OnnxOperatorMixin for LocalOutlierFactor

-
- -
-
-
-
-

OnnxSklearnLogisticRegression#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegression(penalty='l2', *, dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)#
-

OnnxOperatorMixin for LogisticRegression

-
- -
-
-
-
-

OnnxSklearnLogisticRegressionCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegressionCV(*, Cs=10, fit_intercept=True, cv=None, dual=False, penalty='l2', scoring=None, solver='lbfgs', tol=0.0001, max_iter=100, class_weight=None, n_jobs=None, verbose=0, refit=True, intercept_scaling=1.0, multi_class='auto', random_state=None, l1_ratios=None)#
-

OnnxOperatorMixin for LogisticRegressionCV

-
- -
-
-
-
-

OnnxSklearnMLPClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPClassifier(hidden_layer_sizes=(100,), activation='relu', *, solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10, max_fun=15000)#
-

OnnxOperatorMixin for MLPClassifier

-
- -
-
-
-
-

OnnxSklearnMLPRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPRegressor(hidden_layer_sizes=(100,), activation='relu', *, solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10, max_fun=15000)#
-

OnnxOperatorMixin for MLPRegressor

-
- -
-
-
-
-

OnnxSklearnMaxAbsScaler#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMaxAbsScaler(*, copy=True)#
-

OnnxOperatorMixin for MaxAbsScaler

-
- -
-
-
-
-

OnnxSklearnMinMaxScaler#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMinMaxScaler(feature_range=(0, 1), *, copy=True, clip=False)#
-

OnnxOperatorMixin for MinMaxScaler

-
- -
-
-
-
-

OnnxSklearnMiniBatchKMeans#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMiniBatchKMeans(n_clusters=8, *, init='k-means++', max_iter=100, batch_size=1024, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01)#
-

OnnxOperatorMixin for MiniBatchKMeans

-
- -
-
-
-
-

OnnxSklearnMultiOutputClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputClassifier(estimator, *, n_jobs=None)#
-

OnnxOperatorMixin for MultiOutputClassifier

-
- -
-
-
-
-

OnnxSklearnMultiOutputRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputRegressor(estimator, *, n_jobs=None)#
-

OnnxOperatorMixin for MultiOutputRegressor

-
- -
-
-
-
-

OnnxSklearnMultiTaskElasticNet#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNet(alpha=1.0, *, l1_ratio=0.5, fit_intercept=True, normalize='deprecated', copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for MultiTaskElasticNet

-
- -
-
-
-
-

OnnxSklearnMultiTaskElasticNetCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNetCV(*, l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize='deprecated', max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for MultiTaskElasticNetCV

-
- -
-
-
-
-

OnnxSklearnMultiTaskLasso#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLasso(alpha=1.0, *, fit_intercept=True, normalize='deprecated', copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for MultiTaskLasso

-
- -
-
-
-
-

OnnxSklearnMultiTaskLassoCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLassoCV(*, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize='deprecated', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, random_state=None, selection='cyclic')#
-

OnnxOperatorMixin for MultiTaskLassoCV

-
- -
-
-
-
-

OnnxSklearnMultinomialNB#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultinomialNB(*, alpha=1.0, fit_prior=True, class_prior=None)#
-

OnnxOperatorMixin for MultinomialNB

-
- -
-
-
-
-

OnnxSklearnNearestNeighbors#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnNearestNeighbors(*, n_neighbors=5, radius=1.0, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=None)#
-

OnnxOperatorMixin for NearestNeighbors

-
- -
-
-
-
-

OnnxSklearnNeighborhoodComponentsAnalysis#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnNeighborhoodComponentsAnalysis(n_components=None, *, init='auto', warm_start=False, max_iter=50, tol=1e-05, callback=None, verbose=0, random_state=None)#
-

OnnxOperatorMixin for NeighborhoodComponentsAnalysis

-
- -
-
-
-
-

OnnxSklearnNormalizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnNormalizer(norm='l2', *, copy=True)#
-

OnnxOperatorMixin for Normalizer

-
- -
-
-
-
-

OnnxSklearnNuSVC#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVC(*, nu=0.5, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=- 1, decision_function_shape='ovr', break_ties=False, random_state=None)#
-

OnnxOperatorMixin for NuSVC

-
- -
-
-
-
-

OnnxSklearnNuSVR#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVR(*, nu=0.5, C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, tol=0.001, cache_size=200, verbose=False, max_iter=- 1)#
-

OnnxOperatorMixin for NuSVR

-
- -
-
-
-
-

OnnxSklearnOneClassSVM#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneClassSVM(*, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=0.001, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=- 1)#
-

OnnxOperatorMixin for OneClassSVM

-
- -
-
-
-
-

OnnxSklearnOneHotEncoder#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneHotEncoder(*, categories='auto', drop=None, sparse=True, dtype=<class 'numpy.float64'>, handle_unknown='error')#
-

OnnxOperatorMixin for OneHotEncoder

-
- -
-
-
-
-

OnnxSklearnOneVsRestClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneVsRestClassifier(estimator, *, n_jobs=None)#
-

OnnxOperatorMixin for OneVsRestClassifier

-
- -
-
-
-
-

OnnxSklearnOrdinalEncoder#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrdinalEncoder(*, categories='auto', dtype=<class 'numpy.float64'>, handle_unknown='error', unknown_value=None)#
-

OnnxOperatorMixin for OrdinalEncoder

-
- -
-
-
-
-

OnnxSklearnOrthogonalMatchingPursuit#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuit(*, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize='deprecated', precompute='auto')#
-

OnnxOperatorMixin for OrthogonalMatchingPursuit

-
- -
-
-
-
-

OnnxSklearnOrthogonalMatchingPursuitCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuitCV(*, copy=True, fit_intercept=True, normalize='deprecated', max_iter=None, cv=None, n_jobs=None, verbose=False)#
-

OnnxOperatorMixin for OrthogonalMatchingPursuitCV

-
- -
-
-
-
-

OnnxSklearnPCA#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPCA(n_components=None, *, copy=True, whiten=False, svd_solver='auto', tol=0.0, iterated_power='auto', random_state=None)#
-

OnnxOperatorMixin for PCA

-
- -
-
-
-
-

OnnxSklearnPLSRegression#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPLSRegression(n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True)#
-

OnnxOperatorMixin for PLSRegression

-
- -
-
-
-
-

OnnxSklearnPassiveAggressiveClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveClassifier(*, C=1.0, fit_intercept=True, max_iter=1000, tol=0.001, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, shuffle=True, verbose=0, loss='hinge', n_jobs=None, random_state=None, warm_start=False, class_weight=None, average=False)#
-

OnnxOperatorMixin for PassiveAggressiveClassifier

-
- -
-
-
-
-

OnnxSklearnPassiveAggressiveRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveRegressor(*, C=1.0, fit_intercept=True, max_iter=1000, tol=0.001, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, shuffle=True, verbose=0, loss='epsilon_insensitive', epsilon=0.1, random_state=None, warm_start=False, average=False)#
-

OnnxOperatorMixin for PassiveAggressiveRegressor

-
- -
-
-
-
-

OnnxSklearnPerceptron#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPerceptron(*, penalty=None, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, eta0=1.0, n_jobs=None, random_state=0, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False)#
-

OnnxOperatorMixin for Perceptron

-
- -
-
-
-
-

OnnxSklearnPipeline#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline(steps, *, memory=None, verbose=False)[source]#
-

OnnxOperatorMixin for Pipeline

-
- -
-
-
-
-

OnnxSklearnPoissonRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPoissonRegressor(*, alpha=1.0, fit_intercept=True, max_iter=100, tol=0.0001, warm_start=False, verbose=0)#
-

OnnxOperatorMixin for PoissonRegressor

-
- -
-
-
-
-

OnnxSklearnPolynomialFeatures#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPolynomialFeatures(degree=2, *, interaction_only=False, include_bias=True, order='C')#
-

OnnxOperatorMixin for PolynomialFeatures

-
- -
-
-
-
-

OnnxSklearnPowerTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPowerTransformer(method='yeo-johnson', *, standardize=True, copy=True)#
-

OnnxOperatorMixin for PowerTransformer

-
- -
-
-
-
-

OnnxSklearnQuantileRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnQuantileRegressor(*, quantile=0.5, alpha=1.0, fit_intercept=True, solver='interior-point', solver_options=None)#
-

OnnxOperatorMixin for QuantileRegressor

-
- -
-
-
-
-

OnnxSklearnRANSACRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRANSACRegressor(base_estimator=None, *, min_samples=None, residual_threshold=None, is_data_valid=None, is_model_valid=None, max_trials=100, max_skips=inf, stop_n_inliers=inf, stop_score=inf, stop_probability=0.99, loss='absolute_error', random_state=None)#
-

OnnxOperatorMixin for RANSACRegressor

-
- -
-
-
-
-

OnnxSklearnRFE#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRFE(estimator, *, n_features_to_select=None, step=1, verbose=0, importance_getter='auto')#
-

OnnxOperatorMixin for RFE

-
- -
-
-
-
-

OnnxSklearnRFECV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRFECV(estimator, *, step=1, min_features_to_select=1, cv=None, scoring=None, verbose=0, n_jobs=None, importance_getter='auto')#
-

OnnxOperatorMixin for RFECV

-
- -
-
-
-
-

OnnxSklearnRadiusNeighborsClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsClassifier(radius=1.0, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', outlier_label=None, metric_params=None, n_jobs=None, **kwargs)#
-

OnnxOperatorMixin for RadiusNeighborsClassifier

-
- -
-
-
-
-

OnnxSklearnRadiusNeighborsRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsRegressor(radius=1.0, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
-

OnnxOperatorMixin for RadiusNeighborsRegressor

-
- -
-
-
-
-

OnnxSklearnRandomForestClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestClassifier(n_estimators=100, *, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)#
-

OnnxOperatorMixin for RandomForestClassifier

-
- -
-
-
-
-

OnnxSklearnRandomForestRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestRegressor(n_estimators=100, *, criterion='squared_error', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None)#
-

OnnxOperatorMixin for RandomForestRegressor

-
- -
-
-
-
-

OnnxSklearnRandomTreesEmbedding#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomTreesEmbedding(n_estimators=100, *, max_depth=5, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_leaf_nodes=None, min_impurity_decrease=0.0, sparse_output=True, n_jobs=None, random_state=None, verbose=0, warm_start=False)#
-

OnnxOperatorMixin for RandomTreesEmbedding

-
- -
-
-
-
-

OnnxSklearnRidge#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidge(alpha=1.0, *, fit_intercept=True, normalize='deprecated', copy_X=True, max_iter=None, tol=0.001, solver='auto', positive=False, random_state=None)#
-

OnnxOperatorMixin for Ridge

-
- -
-
-
-
-

OnnxSklearnRidgeCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeCV(alphas=(0.1, 1.0, 10.0), *, fit_intercept=True, normalize='deprecated', scoring=None, cv=None, gcv_mode=None, store_cv_values=False, alpha_per_target=False)#
-

OnnxOperatorMixin for RidgeCV

-
- -
-
-
-
-

OnnxSklearnRidgeClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifier(alpha=1.0, *, fit_intercept=True, normalize='deprecated', copy_X=True, max_iter=None, tol=0.001, class_weight=None, solver='auto', positive=False, random_state=None)#
-

OnnxOperatorMixin for RidgeClassifier

-
- -
-
-
-
-

OnnxSklearnRidgeClassifierCV#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifierCV(alphas=(0.1, 1.0, 10.0), *, fit_intercept=True, normalize='deprecated', scoring=None, cv=None, class_weight=None, store_cv_values=False)#
-

OnnxOperatorMixin for RidgeClassifierCV

-
- -
-
-
-
-

OnnxSklearnRobustScaler#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnRobustScaler(*, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True, unit_variance=False)#
-

OnnxOperatorMixin for RobustScaler

-
- -
-
-
-
-

OnnxSklearnSGDClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDClassifier(loss='hinge', *, penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, n_jobs=None, random_state=None, learning_rate='optimal', eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False)#
-

OnnxOperatorMixin for SGDClassifier

-
- -
-
-
-
-

OnnxSklearnSGDRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDRegressor(loss='squared_error', *, penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False)#
-

OnnxOperatorMixin for SGDRegressor

-
- -
-
-
-
-

OnnxSklearnSVC#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSVC(*, C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=- 1, decision_function_shape='ovr', break_ties=False, random_state=None)#
-

OnnxOperatorMixin for SVC

-
- -
-
-
-
-

OnnxSklearnSVR#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSVR(*, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=- 1)#
-

OnnxOperatorMixin for SVR

-
- -
-
-
-
-

OnnxSklearnSelectFdr#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFdr(score_func=<function f_classif>, *, alpha=0.05)#
-

OnnxOperatorMixin for SelectFdr

-
- -
-
-
-
-

OnnxSklearnSelectFpr#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFpr(score_func=<function f_classif>, *, alpha=0.05)#
-

OnnxOperatorMixin for SelectFpr

-
- -
-
-
-
-

OnnxSklearnSelectFromModel#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFromModel(estimator, *, threshold=None, prefit=False, norm_order=1, max_features=None, importance_getter='auto')#
-

OnnxOperatorMixin for SelectFromModel

-
- -
-
-
-
-

OnnxSklearnSelectFwe#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFwe(score_func=<function f_classif>, *, alpha=0.05)#
-

OnnxOperatorMixin for SelectFwe

-
- -
-
-
-
-

OnnxSklearnSelectKBest#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectKBest(score_func=<function f_classif>, *, k=10)#
-

OnnxOperatorMixin for SelectKBest

-
- -
-
-
-
-

OnnxSklearnSelectPercentile#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectPercentile(score_func=<function f_classif>, *, percentile=10)#
-

OnnxOperatorMixin for SelectPercentile

-
- -
-
-
-
-

OnnxSklearnSimpleImputer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnSimpleImputer(*, missing_values=nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False)#
-

OnnxOperatorMixin for SimpleImputer

-
- -
-
-
-
-

OnnxSklearnStackingClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingClassifier(estimators, final_estimator=None, *, cv=None, stack_method='auto', n_jobs=None, passthrough=False, verbose=0)#
-

OnnxOperatorMixin for StackingClassifier

-
- -
-
-
-
-

OnnxSklearnStackingRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingRegressor(estimators, final_estimator=None, *, cv=None, n_jobs=None, passthrough=False, verbose=0)#
-

OnnxOperatorMixin for StackingRegressor

-
- -
-
-
-
-

OnnxSklearnStandardScaler#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnStandardScaler(*, copy=True, with_mean=True, with_std=True)#
-

OnnxOperatorMixin for StandardScaler

-
- -
-
-
-
-

OnnxSklearnTfidfTransformer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfTransformer(*, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)#
-

OnnxOperatorMixin for TfidfTransformer

-
- -
-
-
-
-

OnnxSklearnTfidfVectorizer#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfVectorizer(*, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern='(?u)\\b\\w\\w+\\b', ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=<class 'numpy.float64'>, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)#
-

OnnxOperatorMixin for TfidfVectorizer

-
- -
-
-
-
-

OnnxSklearnTheilSenRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnTheilSenRegressor(*, fit_intercept=True, copy_X=True, max_subpopulation=10000.0, n_subsamples=None, max_iter=300, tol=0.001, random_state=None, n_jobs=None, verbose=False)#
-

OnnxOperatorMixin for TheilSenRegressor

-
- -
-
-
-
-

OnnxSklearnTruncatedSVD#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnTruncatedSVD(n_components=2, *, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)#
-

OnnxOperatorMixin for TruncatedSVD

-
- -
-
-
-
-

OnnxSklearnTweedieRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnTweedieRegressor(*, power=0.0, alpha=1.0, fit_intercept=True, link='auto', max_iter=100, tol=0.0001, warm_start=False, verbose=0)#
-

OnnxOperatorMixin for TweedieRegressor

-
- -
-
-
-
-

OnnxSklearnVarianceThreshold#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnVarianceThreshold(threshold=0.0)#
-

OnnxOperatorMixin for VarianceThreshold

-
- -
-
-
-
-

OnnxSklearnVotingClassifier#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingClassifier(estimators, *, voting='hard', weights=None, n_jobs=None, flatten_transform=True, verbose=False)#
-

OnnxOperatorMixin for VotingClassifier

-
- -
-
-
-
-

OnnxSklearnVotingRegressor#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingRegressor(estimators, *, weights=None, n_jobs=None, verbose=False)#
-

OnnxOperatorMixin for VotingRegressor

-
- -
-
-
-
-

OnnxSklearnXGBClassifier#

-
-
-
-
-

OnnxSklearnXGBRegressor#

-
-
-
-
-

OnnxTransferTransformer#

-
-
-
-
-

OnnxValidatorClassifier#

-
-
-
-
-

OnnxWOEEncoder#

-
-
-
-
-

OnnxWOETransformer#

-
-
-
-
-

OnnxWrappedLightGbmBooster#

-
-
-
-
-

OnnxWrappedLightGbmBoosterClassifier#

-
-
-
-
-
-

Pipeline#

-
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline(steps, *, memory=None, verbose=False)[source]#
-

OnnxOperatorMixin for Pipeline

-
-
-onnx_converter()#
-

Returns a converter for this model. -If not overloaded, it fetches the converter -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_parser()#
-

Returns a parser for this model. -If not overloaded, it calls the converter to guess the number -of outputs. If it still fails, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_shape_calculator()#
-

Returns a shape calculator for this model. -If not overloaded, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
-

Converts the model in ONNX format. -It calls method _to_onnx which must be -overloaded.

-
-
Parameters
-
    -
  • X – training data, at least one sample, -it is used to guess the type of the input data.

  • -
  • name – name of the model, if None, -it is replaced by the the class name.

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes allowed -while converting a pipeline, if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • target_opset – to overwrite self.op_version

  • -
  • verbose – displays information while converting

  • -
-
-
-
- -
-
-to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
-

This function must be overloaded.

-
- -
- -
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer(transformers, *, remainder='drop', sparse_threshold=0.3, n_jobs=None, transformer_weights=None, verbose=False, verbose_feature_names_out=True)[source]#
-

OnnxOperatorMixin for ColumnTransformer

-
-
-onnx_converter()#
-

Returns a converter for this model. -If not overloaded, it fetches the converter -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_parser()#
-

Returns a parser for this model. -If not overloaded, it calls the converter to guess the number -of outputs. If it still fails, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_shape_calculator()#
-

Returns a shape calculator for this model. -If not overloaded, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
-

Converts the model in ONNX format. -It calls method _to_onnx which must be -overloaded.

-
-
Parameters
-
    -
  • X – training data, at least one sample, -it is used to guess the type of the input data.

  • -
  • name – name of the model, if None, -it is replaced by the the class name.

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes allowed -while converting a pipeline, if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • target_opset – to overwrite self.op_version

  • -
  • verbose – displays information while converting

  • -
-
-
-
- -
-
-to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
-

This function must be overloaded.

-
- -
- -
-
-class skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion(transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False)[source]#
-

OnnxOperatorMixin for FeatureUnion

-
-
-onnx_converter()#
-

Returns a converter for this model. -If not overloaded, it fetches the converter -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_parser()#
-

Returns a parser for this model. -If not overloaded, it calls the converter to guess the number -of outputs. If it still fails, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-onnx_shape_calculator()#
-

Returns a shape calculator for this model. -If not overloaded, it fetches the parser -mapped to the first scikit-learn parent -it can find.

-
- -
-
-to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
-

Converts the model in ONNX format. -It calls method _to_onnx which must be -overloaded.

-
-
Parameters
-
    -
  • X – training data, at least one sample, -it is used to guess the type of the input data.

  • -
  • name – name of the model, if None, -it is replaced by the the class name.

  • -
  • options – specific options given to converters -(see Converters with options)

  • -
  • white_op – white list of ONNX nodes allowed -while converting a pipeline, if empty, all are allowed

  • -
  • black_op – black list of ONNX nodes allowed -while converting a pipeline, if empty, none are blacklisted

  • -
  • final_types – a python list. Works the same way as initial_types -but not mandatory, it is used to overwrites the type -(if type is not None) and the name of every output.

  • -
  • target_opset – to overwrite self.op_version

  • -
  • verbose – displays information while converting

  • -
-
-
-
- -
-
-to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
-

This function must be overloaded.

-
- -
- -
-
-

Available ONNX operators#

-

skl2onnx maps every ONNX operators into a class -easy to insert into a graph. These operators get -dynamically added and the list depends on the installed -ONNX package. The documentation for these operators -can be found on github: ONNX Operators.md -and ONNX-ML Operators. -Associated to onnxruntime, -the mapping makes it easier to easily check the output -of the ONNX operators on any data as shown -in example Play with ONNX operators.

-
-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

OnnxAbs

OnnxGreaterOrEqual_16

OnnxReduceL2_1

OnnxAbs_1

OnnxGreater_1

OnnxReduceL2_11

OnnxAbs_13

OnnxGreater_13

OnnxReduceL2_13

OnnxAbs_6

OnnxGreater_7

OnnxReduceLogSum

OnnxAcos

OnnxGreater_9

OnnxReduceLogSumExp

OnnxAcos_7

OnnxGridSample

OnnxReduceLogSumExp_1

OnnxAcosh

OnnxGridSample_16

OnnxReduceLogSumExp_11

OnnxAcosh_9

OnnxHardSigmoid

OnnxReduceLogSumExp_13

OnnxAdagrad

OnnxHardSigmoid_1

OnnxReduceLogSum_1

OnnxAdagrad_1

OnnxHardSigmoid_6

OnnxReduceLogSum_11

OnnxAdam

OnnxHardSwish

OnnxReduceLogSum_13

OnnxAdam_1

OnnxHardSwish_14

OnnxReduceMax

OnnxAdd

OnnxHardmax

OnnxReduceMax_1

OnnxAdd_1

OnnxHardmax_1

OnnxReduceMax_11

OnnxAdd_13

OnnxHardmax_11

OnnxReduceMax_12

OnnxAdd_14

OnnxHardmax_13

OnnxReduceMax_13

OnnxAdd_6

OnnxIdentity

OnnxReduceMean

OnnxAdd_7

OnnxIdentity_1

OnnxReduceMean_1

OnnxAnd

OnnxIdentity_13

OnnxReduceMean_11

OnnxAnd_1

OnnxIdentity_14

OnnxReduceMean_13

OnnxAnd_7

OnnxIdentity_16

OnnxReduceMin

OnnxArgMax

OnnxIf

OnnxReduceMin_1

OnnxArgMax_1

OnnxIf_1

OnnxReduceMin_11

OnnxArgMax_11

OnnxIf_11

OnnxReduceMin_12

OnnxArgMax_12

OnnxIf_13

OnnxReduceMin_13

OnnxArgMax_13

OnnxIf_16

OnnxReduceProd

OnnxArgMin

OnnxImputer

OnnxReduceProd_1

OnnxArgMin_1

OnnxImputer_1

OnnxReduceProd_11

OnnxArgMin_11

OnnxInstanceNormalization

OnnxReduceProd_13

OnnxArgMin_12

OnnxInstanceNormalization_1

OnnxReduceSum

OnnxArgMin_13

OnnxInstanceNormalization_6

OnnxReduceSumSquare

OnnxArrayFeatureExtractor

OnnxIsInf

OnnxReduceSumSquare_1

OnnxArrayFeatureExtractor_1

OnnxIsInf_10

OnnxReduceSumSquare_11

OnnxAsin

OnnxIsNaN

OnnxReduceSumSquare_13

OnnxAsin_7

OnnxIsNaN_13

OnnxReduceSum_1

OnnxAsinh

OnnxIsNaN_9

OnnxReduceSum_11

OnnxAsinh_9

OnnxLRN

OnnxReduceSum_13

OnnxAtan

OnnxLRN_1

OnnxRelu

OnnxAtan_7

OnnxLRN_13

OnnxRelu_1

OnnxAtanh

OnnxLSTM

OnnxRelu_13

OnnxAtanh_9

OnnxLSTM_1

OnnxRelu_14

OnnxAveragePool

OnnxLSTM_14

OnnxRelu_6

OnnxAveragePool_1

OnnxLSTM_7

OnnxReshape

OnnxAveragePool_10

OnnxLabelEncoder

OnnxReshape_1

OnnxAveragePool_11

OnnxLabelEncoder_1

OnnxReshape_13

OnnxAveragePool_7

OnnxLabelEncoder_2

OnnxReshape_14

OnnxBatchNormalization

OnnxLeakyRelu

OnnxReshape_5

OnnxBatchNormalization_1

OnnxLeakyRelu_1

OnnxResize

OnnxBatchNormalization_14

OnnxLeakyRelu_16

OnnxResize_10

OnnxBatchNormalization_15

OnnxLeakyRelu_6

OnnxResize_11

OnnxBatchNormalization_6

OnnxLess

OnnxResize_13

OnnxBatchNormalization_7

OnnxLessOrEqual

OnnxReverseSequence

OnnxBatchNormalization_9

OnnxLessOrEqual_12

OnnxReverseSequence_10

OnnxBernoulli

OnnxLessOrEqual_16

OnnxRoiAlign

OnnxBernoulli_15

OnnxLess_1

OnnxRoiAlign_10

OnnxBinarizer

OnnxLess_13

OnnxRoiAlign_16

OnnxBinarizer_1

OnnxLess_7

OnnxRound

OnnxBitShift

OnnxLess_9

OnnxRound_11

OnnxBitShift_11

OnnxLinearClassifier

OnnxSVMClassifier

OnnxCast

OnnxLinearClassifier_1

OnnxSVMClassifier_1

OnnxCastLike

OnnxLinearRegressor

OnnxSVMRegressor

OnnxCastLike_15

OnnxLinearRegressor_1

OnnxSVMRegressor_1

OnnxCastMap

OnnxLog

OnnxScaler

OnnxCastMap_1

OnnxLogSoftmax

OnnxScaler_1

OnnxCast_1

OnnxLogSoftmax_1

OnnxScan

OnnxCast_13

OnnxLogSoftmax_11

OnnxScan_11

OnnxCast_6

OnnxLogSoftmax_13

OnnxScan_16

OnnxCast_9

OnnxLog_1

OnnxScan_8

OnnxCategoryMapper

OnnxLog_13

OnnxScan_9

OnnxCategoryMapper_1

OnnxLog_6

OnnxScatter

OnnxCeil

OnnxLoop

OnnxScatterElements

OnnxCeil_1

OnnxLoop_1

OnnxScatterElements_11

OnnxCeil_13

OnnxLoop_11

OnnxScatterElements_13

OnnxCeil_6

OnnxLoop_13

OnnxScatterElements_16

OnnxCelu

OnnxLoop_16

OnnxScatterND

OnnxCelu_12

OnnxLpNormalization

OnnxScatterND_11

OnnxClip

OnnxLpNormalization_1

OnnxScatterND_13

OnnxClip_1

OnnxLpPool

OnnxScatterND_16

OnnxClip_11

OnnxLpPool_1

OnnxScatter_11

OnnxClip_12

OnnxLpPool_11

OnnxScatter_9

OnnxClip_13

OnnxLpPool_2

OnnxSelu

OnnxClip_6

OnnxMatMul

OnnxSelu_1

OnnxCompress

OnnxMatMulInteger

OnnxSelu_6

OnnxCompress_11

OnnxMatMulInteger_10

OnnxSequenceAt

OnnxCompress_9

OnnxMatMul_1

OnnxSequenceAt_11

OnnxConcat

OnnxMatMul_13

OnnxSequenceConstruct

OnnxConcatFromSequence

OnnxMatMul_9

OnnxSequenceConstruct_11

OnnxConcatFromSequence_11

OnnxMax

OnnxSequenceEmpty

OnnxConcat_1

OnnxMaxPool

OnnxSequenceEmpty_11

OnnxConcat_11

OnnxMaxPool_1

OnnxSequenceErase

OnnxConcat_13

OnnxMaxPool_10

OnnxSequenceErase_11

OnnxConcat_4

OnnxMaxPool_11

OnnxSequenceInsert

OnnxConstant

OnnxMaxPool_12

OnnxSequenceInsert_11

OnnxConstantOfShape

OnnxMaxPool_8

OnnxSequenceLength

OnnxConstantOfShape_9

OnnxMaxRoiPool

OnnxSequenceLength_11

OnnxConstant_1

OnnxMaxRoiPool_1

OnnxShape

OnnxConstant_11

OnnxMaxUnpool

OnnxShape_1

OnnxConstant_12

OnnxMaxUnpool_11

OnnxShape_13

OnnxConstant_13

OnnxMaxUnpool_9

OnnxShape_15

OnnxConstant_9

OnnxMax_1

OnnxShrink

OnnxConv

OnnxMax_12

OnnxShrink_9

OnnxConvInteger

OnnxMax_13

OnnxSigmoid

OnnxConvInteger_10

OnnxMax_6

OnnxSigmoid_1

OnnxConvTranspose

OnnxMax_8

OnnxSigmoid_13

OnnxConvTranspose_1

OnnxMean

OnnxSigmoid_6

OnnxConvTranspose_11

OnnxMeanVarianceNormalization

OnnxSign

OnnxConv_1

OnnxMeanVarianceNormalization_13

OnnxSign_13

OnnxConv_11

OnnxMeanVarianceNormalization_9

OnnxSign_9

OnnxCos

OnnxMean_1

OnnxSin

OnnxCos_7

OnnxMean_13

OnnxSin_7

OnnxCosh

OnnxMean_6

OnnxSinh

OnnxCosh_9

OnnxMean_8

OnnxSinh_9

OnnxCumSum

OnnxMin

OnnxSize

OnnxCumSum_11

OnnxMin_1

OnnxSize_1

OnnxCumSum_14

OnnxMin_12

OnnxSize_13

OnnxDepthToSpace

OnnxMin_13

OnnxSlice

OnnxDepthToSpace_1

OnnxMin_6

OnnxSlice_1

OnnxDepthToSpace_11

OnnxMin_8

OnnxSlice_10

OnnxDepthToSpace_13

OnnxMod

OnnxSlice_11

OnnxDequantizeLinear

OnnxMod_10

OnnxSlice_13

OnnxDequantizeLinear_10

OnnxMod_13

OnnxSoftmax

OnnxDequantizeLinear_13

OnnxMomentum

OnnxSoftmaxCrossEntropyLoss

OnnxDet

OnnxMomentum_1

OnnxSoftmaxCrossEntropyLoss_12

OnnxDet_11

OnnxMul

OnnxSoftmaxCrossEntropyLoss_13

OnnxDictVectorizer

OnnxMul_1

OnnxSoftmax_1

OnnxDictVectorizer_1

OnnxMul_13

OnnxSoftmax_11

OnnxDiv

OnnxMul_14

OnnxSoftmax_13

OnnxDiv_1

OnnxMul_6

OnnxSoftplus

OnnxDiv_13

OnnxMul_7

OnnxSoftplus_1

OnnxDiv_14

OnnxMultinomial

OnnxSoftsign

OnnxDiv_6

OnnxMultinomial_7

OnnxSoftsign_1

OnnxDiv_7

OnnxNeg

OnnxSpaceToDepth

OnnxDropout

OnnxNeg_1

OnnxSpaceToDepth_1

OnnxDropout_1

OnnxNeg_13

OnnxSpaceToDepth_13

OnnxDropout_10

OnnxNeg_6

OnnxSplit

OnnxDropout_12

OnnxNegativeLogLikelihoodLoss

OnnxSplitToSequence

OnnxDropout_13

OnnxNegativeLogLikelihoodLoss_12

OnnxSplitToSequence_11

OnnxDropout_6

OnnxNegativeLogLikelihoodLoss_13

OnnxSplit_1

OnnxDropout_7

OnnxNonMaxSuppression

OnnxSplit_11

OnnxDynamicQuantizeLinear

OnnxNonMaxSuppression_10

OnnxSplit_13

OnnxDynamicQuantizeLinear_11

OnnxNonMaxSuppression_11

OnnxSplit_2

OnnxEinsum

OnnxNonZero

OnnxSqrt

OnnxEinsum_12

OnnxNonZero_13

OnnxSqrt_1

OnnxElu

OnnxNonZero_9

OnnxSqrt_13

OnnxElu_1

OnnxNormalizer

OnnxSqrt_6

OnnxElu_6

OnnxNormalizer_1

OnnxSqueeze

OnnxEqual

OnnxNot

OnnxSqueeze_1

OnnxEqual_1

OnnxNot_1

OnnxSqueeze_11

OnnxEqual_11

OnnxOneHot

OnnxSqueeze_13

OnnxEqual_13

OnnxOneHotEncoder

OnnxStringNormalizer

OnnxEqual_7

OnnxOneHotEncoder_1

OnnxStringNormalizer_10

OnnxErf

OnnxOneHot_11

OnnxSub

OnnxErf_13

OnnxOneHot_9

OnnxSub_1

OnnxErf_9

OnnxOptional

OnnxSub_13

OnnxExp

OnnxOptionalGetElement

OnnxSub_14

OnnxExp_1

OnnxOptionalGetElement_15

OnnxSub_6

OnnxExp_13

OnnxOptionalHasElement

OnnxSub_7

OnnxExp_6

OnnxOptionalHasElement_15

OnnxSum

OnnxExpand

OnnxOptional_15

OnnxSum_1

OnnxExpand_13

OnnxOr

OnnxSum_13

OnnxExpand_8

OnnxOr_1

OnnxSum_6

OnnxEyeLike

OnnxOr_7

OnnxSum_8

OnnxEyeLike_9

OnnxPRelu

OnnxTan

OnnxFeatureVectorizer

OnnxPRelu_1

OnnxTan_7

OnnxFeatureVectorizer_1

OnnxPRelu_16

OnnxTanh

OnnxFlatten

OnnxPRelu_6

OnnxTanh_1

OnnxFlatten_1

OnnxPRelu_7

OnnxTanh_13

OnnxFlatten_11

OnnxPRelu_9

OnnxTanh_6

OnnxFlatten_13

OnnxPad

OnnxTfIdfVectorizer

OnnxFlatten_9

OnnxPad_1

OnnxTfIdfVectorizer_9

OnnxFloor

OnnxPad_11

OnnxThresholdedRelu

OnnxFloor_1

OnnxPad_13

OnnxThresholdedRelu_10

OnnxFloor_13

OnnxPad_2

OnnxTile

OnnxFloor_6

OnnxPow

OnnxTile_1

OnnxGRU

OnnxPow_1

OnnxTile_13

OnnxGRU_1

OnnxPow_12

OnnxTile_6

OnnxGRU_14

OnnxPow_13

OnnxTopK

OnnxGRU_3

OnnxPow_15

OnnxTopK_1

OnnxGRU_7

OnnxPow_7

OnnxTopK_10

OnnxGather

OnnxQLinearConv

OnnxTopK_11

OnnxGatherElements

OnnxQLinearConv_10

OnnxTranspose

OnnxGatherElements_11

OnnxQLinearMatMul

OnnxTranspose_1

OnnxGatherElements_13

OnnxQLinearMatMul_10

OnnxTranspose_13

OnnxGatherND

OnnxQuantizeLinear

OnnxTreeEnsembleClassifier

OnnxGatherND_11

OnnxQuantizeLinear_10

OnnxTreeEnsembleClassifier_1

OnnxGatherND_12

OnnxQuantizeLinear_13

OnnxTreeEnsembleClassifier_3

OnnxGatherND_13

OnnxRNN

OnnxTreeEnsembleRegressor

OnnxGather_1

OnnxRNN_1

OnnxTreeEnsembleRegressor_1

OnnxGather_11

OnnxRNN_14

OnnxTreeEnsembleRegressor_3

OnnxGather_13

OnnxRNN_7

OnnxTrilu

OnnxGemm

OnnxRandomNormal

OnnxTrilu_14

OnnxGemm_1

OnnxRandomNormalLike

OnnxUnique

OnnxGemm_11

OnnxRandomNormalLike_1

OnnxUnique_11

OnnxGemm_13

OnnxRandomNormal_1

OnnxUnsqueeze

OnnxGemm_6

OnnxRandomUniform

OnnxUnsqueeze_1

OnnxGemm_7

OnnxRandomUniformLike

OnnxUnsqueeze_11

OnnxGemm_9

OnnxRandomUniformLike_1

OnnxUnsqueeze_13

OnnxGlobalAveragePool

OnnxRandomUniform_1

OnnxUpsample

OnnxGlobalAveragePool_1

OnnxRange

OnnxUpsample_10

OnnxGlobalLpPool

OnnxRange_11

OnnxUpsample_7

OnnxGlobalLpPool_1

OnnxReciprocal

OnnxUpsample_9

OnnxGlobalLpPool_2

OnnxReciprocal_1

OnnxWhere

OnnxGlobalMaxPool

OnnxReciprocal_13

OnnxWhere_16

OnnxGlobalMaxPool_1

OnnxReciprocal_6

OnnxWhere_9

OnnxGradient

OnnxReduceL1

OnnxXor

OnnxGradient_1

OnnxReduceL1_1

OnnxXor_1

OnnxGreater

OnnxReduceL1_11

OnnxXor_7

OnnxGreaterOrEqual

OnnxReduceL1_13

OnnxZipMap

OnnxGreaterOrEqual_12

OnnxReduceL2

OnnxZipMap_1

-
-
-
-

OnnxAbs#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAbs(*args, **kwargs)#
-

Version

-

Onnx name: Abs

-

This version of the operator has been available since -version 13.

-

Summary

-

Absolute takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the absolute is, y = abs(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAbs_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAbs_1(*args, **kwargs)#
-

Version

-

Onnx name: Abs

-

This version of the operator has been available since -version 1.

-

Summary

-

Absolute takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the absolute is, y = abs(x), is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAbs_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAbs_13(*args, **kwargs)#
-

Version

-

Onnx name: Abs

-

This version of the operator has been available since -version 13.

-

Summary

-

Absolute takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the absolute is, y = abs(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAbs_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAbs_6(*args, **kwargs)#
-

Version

-

Onnx name: Abs

-

This version of the operator has been available since -version 6.

-

Summary

-

Absolute takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the absolute is, y = abs(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAcos#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAcos(*args, **kwargs)#
-

Version

-

Onnx name: Acos

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arccosine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAcos_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAcos_7(*args, **kwargs)#
-

Version

-

Onnx name: Acos

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arccosine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAcosh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAcosh(*args, **kwargs)#
-

Version

-

Onnx name: Acosh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arccosine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arccosine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAcosh_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAcosh_9(*args, **kwargs)#
-

Version

-

Onnx name: Acosh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arccosine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arccosine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdagrad#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdagrad(*args, **kwargs)#
-

Version

-

Onnx name: Adagrad

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of ADAGRAD, a stochastic gradient based optimization -algorithm. This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. As you can imagine, ADAGRAD requires -some parameters:

-
-
    -
  • The initial learning-rate “R”.

  • -
  • The update count “T”. That is, the number of training iterations conducted.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A learning-rate decay factor “decay_factor”.

  • -
  • A small constant “epsilon” to avoid dividing-by-zero.

  • -
-
-

At each ADAGRAD iteration, the optimized tensors are moved along a direction -computed based on their estimated gradient and accumulated squared gradient. Assume -that only a single tensor “X” is updated by this operator. We need the value of “X”, -its gradient “G”, and its accumulated squared gradient “H”. Therefore, variables in -this operator’s input list are sequentially “R”, “T”, “X”, “G”, and “H”. Other -parameters are given as attributes because they are usually constants. Also, the -corresponding output tensors are the new value of “X” (called “X_new”), and then -the new accumulated squared gradient (called “H_new”). Those outputs are computed -from the given inputs following the pseudo code below.

-

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with -numpy-style broadcasting support. The pseudo code to compute those outputs is:

-
-

// Compute a scalar learning-rate factor. At the first update of X, T is generally -// 0 (0-based update index) or 1 (1-based update index). -r = R / (1 + T * decay_factor);

-

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. -G_regularized = norm_coefficient * X + G;

-

// Compute new accumulated squared gradient. -H_new = H + G_regularized * G_regularized;

-

// Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(…) -// computes element-wise square-root. -H_adaptive = Sqrt(H_new) + epsilon

-

// Compute the new value of “X”. -X_new = X - r * G_regularized / H_adaptive;

-
-

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”, the same -pseudo code may be extended to handle all tensors jointly. More specifically, we can view “X” as a -concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should -be concatenated too) and then just reuse the entire pseudo code.

-

Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. -In that reference paper, this operator is a special case of the Figure 1’s composite mirror -descent update.

-

Attributes

-
    -
  • decay_factor: The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn’t reduce the learning rate. Default value is -name: "decay_factor" f: 0.0 type: FLOAT

  • -
  • epsilon: Small scalar to avoid dividing by zero. Default value is -name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • -
  • norm_coefficient: Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient" f: 0.0 type: FLOAT

  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The initial learning rate.

  • -
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor “X_1” and “X_2” are optimized, The input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdagrad_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdagrad_1(*args, **kwargs)#
-

Version

-

Onnx name: Adagrad

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of ADAGRAD, a stochastic gradient based optimization -algorithm. This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. As you can imagine, ADAGRAD requires -some parameters:

-
-
    -
  • The initial learning-rate “R”.

  • -
  • The update count “T”. That is, the number of training iterations conducted.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A learning-rate decay factor “decay_factor”.

  • -
  • A small constant “epsilon” to avoid dividing-by-zero.

  • -
-
-

At each ADAGRAD iteration, the optimized tensors are moved along a direction -computed based on their estimated gradient and accumulated squared gradient. Assume -that only a single tensor “X” is updated by this operator. We need the value of “X”, -its gradient “G”, and its accumulated squared gradient “H”. Therefore, variables in -this operator’s input list are sequentially “R”, “T”, “X”, “G”, and “H”. Other -parameters are given as attributes because they are usually constants. Also, the -corresponding output tensors are the new value of “X” (called “X_new”), and then -the new accumulated squared gradient (called “H_new”). Those outputs are computed -from the given inputs following the pseudo code below.

-

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with -numpy-style broadcasting support. The pseudo code to compute those outputs is:

-
-

// Compute a scalar learning-rate factor. At the first update of X, T is generally -// 0 (0-based update index) or 1 (1-based update index). -r = R / (1 + T * decay_factor);

-

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. -G_regularized = norm_coefficient * X + G;

-

// Compute new accumulated squared gradient. -H_new = H + G_regularized * G_regularized;

-

// Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(…) -// computes element-wise square-root. -H_adaptive = Sqrt(H_new) + epsilon

-

// Compute the new value of “X”. -X_new = X - r * G_regularized / H_adaptive;

-
-

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”, the same -pseudo code may be extended to handle all tensors jointly. More specifically, we can view “X” as a -concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should -be concatenated too) and then just reuse the entire pseudo code.

-

Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. -In that reference paper, this operator is a special case of the Figure 1’s composite mirror -descent update.

-

Attributes

-
    -
  • decay_factor: The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn’t reduce the learning rate. Default value is -name: "decay_factor" f: 0.0 type: FLOAT

  • -
  • epsilon: Small scalar to avoid dividing by zero. Default value is -name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • -
  • norm_coefficient: Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient" f: 0.0 type: FLOAT

  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The initial learning rate.

  • -
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor “X_1” and “X_2” are optimized, The input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdam#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdam(*args, **kwargs)#
-

Version

-

Onnx name: Adam

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of Adam, a stochastic gradient based optimization -algorithm. This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. First of all, Adam requires -some parameters:

-
-
    -
  • The learning-rate “R”.

  • -
  • The update count “T”. That is, the number of training iterations conducted.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A small constant “epsilon” to avoid dividing-by-zero.

  • -
  • Two coefficients, “alpha” and “beta”.

  • -
-
-

At each Adam iteration, the optimized tensors are moved along a direction -computed based on their exponentially-averaged historical gradient and -exponentially-averaged historical squared gradient. Assume that only a tensor -“X” is being optimized. The rest of required information is

-
-
    -
  • the value of “X”,

  • -
  • “X“‘s gradient (denoted by “G”),

  • -
  • “X“‘s exponentially-averaged historical gradient (denoted by “V”), and

  • -
  • “X“‘s exponentially-averaged historical squared gradient (denoted by “H”).

  • -
-
-

Some of those parameters are passed into this operator as input tensors and others -are stored as this operator’s attributes. Specifically, this operator’s input tensor -list is [“R”, “T”, “X”, “G”, “V”, “H”]. That is, “R” is the first input, “T” is -the second input, and so on. Other parameters are given as attributes because they -are constants. Moreover, the corresponding output tensors are

-
-
    -
  • the new value of “X” (called “X_new”),

  • -
  • the new exponentially-averaged historical gradient (denoted by “V_new”), and

  • -
  • the new exponentially-averaged historical squared gradient (denoted by “H_new”).

  • -
-
-

Those outputs are computed following the pseudo code below.

-

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with -numpy-style broadcasting support. The pseudo code to compute those outputs is:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. -G_regularized = norm_coefficient * X + G

-

// Update exponentially-averaged historical gradient. -V_new = alpha * V + (1 - alpha) * G_regularized

-

// Update exponentially-averaged historical squared gradient. -H_new = beta * H + (1 - beta) * G_regularized * G_regularized

-

// Compute the element-wise square-root of H_new. V_new will be element-wisely -// divided by H_sqrt for a better update direction. -H_sqrt = Sqrt(H_new) + epsilon

-

// Compute learning-rate. Note that “alpha**T”/”beta**T” is alpha’s/beta’s T-th power. -R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R

-

// Compute new value of “X”. -X_new = X - R_adjusted * V_new / H_sqrt

-

// Post-update regularization. -X_final = (1 - norm_coefficient_post) * X_new

-
-

If there are multiple inputs to be optimized, the pseudo code will be applied -independently to each of them.

-

Attributes

-
    -
  • alpha: Coefficient of previously accumulated gradient in running average. Default to 0.9. Default value is -name: "alpha" f: 0.8999999761581421 type: FLOAT

  • -
  • beta: Coefficient of previously accumulated squared-gradient in running average. Default to 0.999. Default value is -name: "beta" f: 0.9990000128746033 type: FLOAT

  • -
  • epsilon: Small scalar to avoid dividing by zero. Default value is -name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • -
  • norm_coefficient: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient" f: 0.0 type: FLOAT

  • -
  • norm_coefficient_post: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient_post" f: 0.0 type: FLOAT

  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The initial learning rate.

  • -
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors “X_1” and “X_2,”, the input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated gradient of “X_1”, accumulated gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors “X_1” and “X_2” are optimized, the outputs list would be [new value of “X_1”, new value of “X_2”, new accumulated gradient of “X_1”, new accumulated gradient of “X_2”, new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdam_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdam_1(*args, **kwargs)#
-

Version

-

Onnx name: Adam

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of Adam, a stochastic gradient based optimization -algorithm. This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. First of all, Adam requires -some parameters:

-
-
    -
  • The learning-rate “R”.

  • -
  • The update count “T”. That is, the number of training iterations conducted.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A small constant “epsilon” to avoid dividing-by-zero.

  • -
  • Two coefficients, “alpha” and “beta”.

  • -
-
-

At each Adam iteration, the optimized tensors are moved along a direction -computed based on their exponentially-averaged historical gradient and -exponentially-averaged historical squared gradient. Assume that only a tensor -“X” is being optimized. The rest of required information is

-
-
    -
  • the value of “X”,

  • -
  • “X“‘s gradient (denoted by “G”),

  • -
  • “X“‘s exponentially-averaged historical gradient (denoted by “V”), and

  • -
  • “X“‘s exponentially-averaged historical squared gradient (denoted by “H”).

  • -
-
-

Some of those parameters are passed into this operator as input tensors and others -are stored as this operator’s attributes. Specifically, this operator’s input tensor -list is [“R”, “T”, “X”, “G”, “V”, “H”]. That is, “R” is the first input, “T” is -the second input, and so on. Other parameters are given as attributes because they -are constants. Moreover, the corresponding output tensors are

-
-
    -
  • the new value of “X” (called “X_new”),

  • -
  • the new exponentially-averaged historical gradient (denoted by “V_new”), and

  • -
  • the new exponentially-averaged historical squared gradient (denoted by “H_new”).

  • -
-
-

Those outputs are computed following the pseudo code below.

-

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with -numpy-style broadcasting support. The pseudo code to compute those outputs is:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. -G_regularized = norm_coefficient * X + G

-

// Update exponentially-averaged historical gradient. -V_new = alpha * V + (1 - alpha) * G_regularized

-

// Update exponentially-averaged historical squared gradient. -H_new = beta * H + (1 - beta) * G_regularized * G_regularized

-

// Compute the element-wise square-root of H_new. V_new will be element-wisely -// divided by H_sqrt for a better update direction. -H_sqrt = Sqrt(H_new) + epsilon

-

// Compute learning-rate. Note that “alpha**T”/”beta**T” is alpha’s/beta’s T-th power. -R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R

-

// Compute new value of “X”. -X_new = X - R_adjusted * V_new / H_sqrt

-

// Post-update regularization. -X_final = (1 - norm_coefficient_post) * X_new

-
-

If there are multiple inputs to be optimized, the pseudo code will be applied -independently to each of them.

-

Attributes

-
    -
  • alpha: Coefficient of previously accumulated gradient in running average. Default to 0.9. Default value is -name: "alpha" f: 0.8999999761581421 type: FLOAT

  • -
  • beta: Coefficient of previously accumulated squared-gradient in running average. Default to 0.999. Default value is -name: "beta" f: 0.9990000128746033 type: FLOAT

  • -
  • epsilon: Small scalar to avoid dividing by zero. Default value is -name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • -
  • norm_coefficient: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient" f: 0.0 type: FLOAT

  • -
  • norm_coefficient_post: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is -name: "norm_coefficient_post" f: 0.0 type: FLOAT

  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The initial learning rate.

  • -
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors “X_1” and “X_2,”, the input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated gradient of “X_1”, accumulated gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors “X_1” and “X_2” are optimized, the outputs list would be [new value of “X_1”, new value of “X_2”, new accumulated gradient of “X_1”, new accumulated gradient of “X_2”, new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdd#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary addition (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAdd_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd_1(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 1.

-

Summary

-

Performs element-wise binary addition (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAdd_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd_13(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 13.

-

Summary

-

Performs element-wise binary addition (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAdd_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd_14(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary addition (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAdd_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd_6(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 6.

-

Summary

-

Performs element-wise binary addition (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAdd_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAdd_7(*args, **kwargs)#
-

Version

-

Onnx name: Add

-

This version of the operator has been available since -version 7.

-

Summary

-

Performs element-wise binary addition (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxAnd#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAnd(*args, **kwargs)#
-

Version

-

Onnx name: And

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the and logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxAnd_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAnd_1(*args, **kwargs)#
-

Version

-

Onnx name: And

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the and logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxAnd_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAnd_7(*args, **kwargs)#
-

Version

-

Onnx name: And

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the and logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxArgMax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMax(*args, **kwargs)#
-

Version

-

Onnx name: ArgMax

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the indices of the max elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the max -is selected if the max appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMax_1(*args, **kwargs)#
-

Version

-

Onnx name: ArgMax

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the indices of the max elements of the input tensor’s element along the -provided axis. The resulted tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMax_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMax_11(*args, **kwargs)#
-

Version

-

Onnx name: ArgMax

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the indices of the max elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMax_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMax_12(*args, **kwargs)#
-

Version

-

Onnx name: ArgMax

-

This version of the operator has been available since -version 12.

-

Summary

-

Computes the indices of the max elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the max -is selected if the max appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMax_13(*args, **kwargs)#
-

Version

-

Onnx name: ArgMax

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the indices of the max elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the max -is selected if the max appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMin#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMin(*args, **kwargs)#
-

Version

-

Onnx name: ArgMin

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the indices of the min elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the min -is selected if the min appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMin_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMin_1(*args, **kwargs)#
-

Version

-

Onnx name: ArgMin

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the indices of the min elements of the input tensor’s element along the -provided axis. The resulted tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMin_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMin_11(*args, **kwargs)#
-

Version

-

Onnx name: ArgMin

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the indices of the min elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMin_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMin_12(*args, **kwargs)#
-

Version

-

Onnx name: ArgMin

-

This version of the operator has been available since -version 12.

-

Summary

-

Computes the indices of the min elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the min -is selected if the min appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArgMin_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArgMin_13(*args, **kwargs)#
-

Version

-

Onnx name: ArgMin

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the indices of the min elements of the input tensor’s element along the -provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. -If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. -If select_last_index is True (default False), the index of the last occurrence of the min -is selected if the min appears more than once in the input. Otherwise the index of the -first occurrence is selected. -The type of the output tensor is integer.

-

Attributes

-
    -
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is -name: "select_last_index" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxArrayFeatureExtractor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor(*args, **kwargs)#
-

Version

-

Onnx name: ArrayFeatureExtractor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Select elements of the input tensor based on the indices passed.

-

The indices are applied to the last axes of the tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Data to be selected

  • -
  • Y (heterogeneous)tensor(int64): The indices, based on 0 as the first index of any dimension.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Selected output data as an array

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32), tensor(string): The input must be a tensor of a numeric type or string. The output will be of the same tensor type.

  • -
-
- -
-
-
-
-

OnnxArrayFeatureExtractor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor_1(*args, **kwargs)#
-

Version

-

Onnx name: ArrayFeatureExtractor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Select elements of the input tensor based on the indices passed.

-

The indices are applied to the last axes of the tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Data to be selected

  • -
  • Y (heterogeneous)tensor(int64): The indices, based on 0 as the first index of any dimension.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Selected output data as an array

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32), tensor(string): The input must be a tensor of a numeric type or string. The output will be of the same tensor type.

  • -
-
- -
-
-
-
-

OnnxAsin#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAsin(*args, **kwargs)#
-

Version

-

Onnx name: Asin

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arcsine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAsin_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAsin_7(*args, **kwargs)#
-

Version

-

Onnx name: Asin

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arcsine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAsinh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAsinh(*args, **kwargs)#
-

Version

-

Onnx name: Asinh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arcsine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arcsine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAsinh_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAsinh_9(*args, **kwargs)#
-

Version

-

Onnx name: Asinh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arcsine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arcsine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAtan#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAtan(*args, **kwargs)#
-

Version

-

Onnx name: Atan

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arctangent of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAtan_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAtan_7(*args, **kwargs)#
-

Version

-

Onnx name: Atan

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The arctangent of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAtanh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAtanh(*args, **kwargs)#
-

Version

-

Onnx name: Atanh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arctangent of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arctangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAtanh_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAtanh_9(*args, **kwargs)#
-

Version

-

Onnx name: Atanh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic arctangent of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic arctangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAveragePool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAveragePool(*args, **kwargs)#
-

Version

-

Onnx name: AveragePool

-

This version of the operator has been available since -version 11.

-

Summary

-

AveragePool consumes an input tensor X and applies average pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -average pooling consisting of computing the average on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is -name: "count_include_pad" i: 0 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAveragePool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAveragePool_1(*args, **kwargs)#
-

Version

-

Onnx name: AveragePool

-

This version of the operator has been available since -version 1.

-

Summary

-

AveragePool consumes an input tensor X and applies average pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -average pooling consisting of computing the average on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is divided by the number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAveragePool_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAveragePool_10(*args, **kwargs)#
-

Version

-

Onnx name: AveragePool

-

This version of the operator has been available since -version 10.

-

Summary

-

AveragePool consumes an input tensor X and applies average pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -average pooling consisting of computing the average on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is -name: "count_include_pad" i: 0 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAveragePool_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAveragePool_11(*args, **kwargs)#
-

Version

-

Onnx name: AveragePool

-

This version of the operator has been available since -version 11.

-

Summary

-

AveragePool consumes an input tensor X and applies average pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -average pooling consisting of computing the average on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is -name: "count_include_pad" i: 0 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxAveragePool_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxAveragePool_7(*args, **kwargs)#
-

Version

-

Onnx name: AveragePool

-

This version of the operator has been available since -version 7.

-

Summary

-

AveragePool consumes an input tensor X and applies average pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -average pooling consisting of computing the average on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is -name: "count_include_pad" i: 0 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 15.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and -‘input_var’. -Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated -statistics in inference mode (training_mode=False, default), -and the running statistics in training mode (training_mode=True). -There are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, running_mean, running_var (training_mode=True) -Output case #2: Y (training_mode=False)

-

When training_mode=False, extra outputs are invalid. -The outputs are updated as follows when training_mode=True:

-
running_mean = input_mean * momentum + current_mean * (1 - momentum)
-running_var = input_var * momentum + current_var * (1 - momentum)
-
-Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
-
-where:
-
-current_mean = ReduceMean(X, axis=all_except_channel_index)
-current_var =  ReduceVar(X, axis=all_except_channel_index)
-
-Notice that ReduceVar refers to the population variance, and it equals to
-sum(sqrd(x_i - x_avg)) / N
-where N is the population size (this formula does not use sample size N - 1).
-
-
-

The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.

-

When training_mode=False:

-
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
-
-
-

For previous (depreciated) non-spatial cases, implementors are suggested -to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is -name: "training_mode" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • -
  • scale (heterogeneous)T1: Scale tensor of shape (C).

  • -
  • B (heterogeneous)T1: Bias tensor of shape (C).

  • -
  • input_mean (heterogeneous)T2: running (training) or estimated (testing) mean tensor of shape (C).

  • -
  • input_var (heterogeneous)T2: running (training) or estimated (testing) variance tensor of shape (C).

  • -
-

Outputs

-

Between 1 and 3 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • -
  • running_mean (optional, heterogeneous)T2: The running mean after the BatchNormalization operator.

  • -
  • running_var (optional, heterogeneous)T2: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain scale and bias types to float tensors.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_1(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 1.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -there are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) -Output case #2: Y (test mode)

-

Attributes

-
    -
  • -
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • is_test: If set to nonzero, run spatial batch normalization in test mode, default is 0. Default value is -name: "is_test" i: 0 type: INT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f. Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • spatial: If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1. Default value is -name: "spatial" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: The input 4-dimensional tensor of shape NCHW.

  • -
  • scale (heterogeneous)T: The scale as a 1-dimensional tensor of size C to be applied to the output.

  • -
  • B (heterogeneous)T: The bias as a 1-dimensional tensor of size C to be applied to the output.

  • -
  • mean (heterogeneous)T: The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.

  • -
  • var (heterogeneous)T: The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.

  • -
-

Outputs

-

Between 1 and 5 outputs.

-
    -
  • Y (heterogeneous)T: The output 4-dimensional tensor of the same shape as X.

  • -
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.

  • -
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.

  • -
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation. Should not be used for testing.

  • -
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation. Should not be used for testing.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_14(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 14.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and -‘input_var’. -Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated -statistics in inference mode (training_mode=False, default), -and the running statistics in training mode (training_mode=True). -There are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, running_mean, running_var (training_mode=True) -Output case #2: Y (training_mode=False)

-

When training_mode=False, extra outputs are invalid. -The outputs are updated as follows when training_mode=True:

-
running_mean = input_mean * momentum + current_mean * (1 - momentum)
-running_var = input_var * momentum + current_var * (1 - momentum)
-
-Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
-
-where:
-
-current_mean = ReduceMean(X, axis=all_except_channel_index)
-current_var =  ReduceVar(X, axis=all_except_channel_index)
-
-Notice that ReduceVar refers to the population variance, and it equals to
-sum(sqrd(x_i - x_avg)) / N
-where N is the population size (this formula does not use sample size N - 1).
-
-
-

When training_mode=False:

-
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
-
-
-

For previous (depreciated) non-spatial cases, implementors are suggested -to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is -name: "training_mode" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • -
  • scale (heterogeneous)T: Scale tensor of shape (C).

  • -
  • B (heterogeneous)T: Bias tensor of shape (C).

  • -
  • input_mean (heterogeneous)U: running (training) or estimated (testing) mean tensor of shape (C).

  • -
  • input_var (heterogeneous)U: running (training) or estimated (testing) variance tensor of shape (C).

  • -
-

Outputs

-

Between 1 and 3 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • -
  • running_mean (optional, heterogeneous)U: The running mean after the BatchNormalization operator.

  • -
  • running_var (optional, heterogeneous)U: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • U tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors. It allows all float type for U.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_15(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 15.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and -‘input_var’. -Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated -statistics in inference mode (training_mode=False, default), -and the running statistics in training mode (training_mode=True). -There are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, running_mean, running_var (training_mode=True) -Output case #2: Y (training_mode=False)

-

When training_mode=False, extra outputs are invalid. -The outputs are updated as follows when training_mode=True:

-
running_mean = input_mean * momentum + current_mean * (1 - momentum)
-running_var = input_var * momentum + current_var * (1 - momentum)
-
-Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
-
-where:
-
-current_mean = ReduceMean(X, axis=all_except_channel_index)
-current_var =  ReduceVar(X, axis=all_except_channel_index)
-
-Notice that ReduceVar refers to the population variance, and it equals to
-sum(sqrd(x_i - x_avg)) / N
-where N is the population size (this formula does not use sample size N - 1).
-
-
-

The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.

-

When training_mode=False:

-
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
-
-
-

For previous (depreciated) non-spatial cases, implementors are suggested -to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is -name: "training_mode" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • -
  • scale (heterogeneous)T1: Scale tensor of shape (C).

  • -
  • B (heterogeneous)T1: Bias tensor of shape (C).

  • -
  • input_mean (heterogeneous)T2: running (training) or estimated (testing) mean tensor of shape (C).

  • -
  • input_var (heterogeneous)T2: running (training) or estimated (testing) variance tensor of shape (C).

  • -
-

Outputs

-

Between 1 and 3 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • -
  • running_mean (optional, heterogeneous)T2: The running mean after the BatchNormalization operator.

  • -
  • running_var (optional, heterogeneous)T2: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain scale and bias types to float tensors.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_6(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 6.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -there are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) -Output case #2: Y (test mode)

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • is_test: If set to nonzero, run spatial batch normalization in test mode, default is 0. Default value is -name: "is_test" i: 0 type: INT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f. Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • spatial: If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1. Default value is -name: "spatial" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
  • scale (heterogeneous)T: The scale as a 1-dimensional tensor of size C to be applied to the output.

  • -
  • B (heterogeneous)T: The bias as a 1-dimensional tensor of size C to be applied to the output.

  • -
  • mean (heterogeneous)T: The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.

  • -
  • var (heterogeneous)T: The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.

  • -
-

Outputs

-

Between 1 and 5 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X.

  • -
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.

  • -
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.

  • -
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation. Should not be used for testing.

  • -
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation. Should not be used for testing.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_7(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 7.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -there are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) -Output case #2: Y (test mode)

-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-
-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
  • spatial: If true, compute the mean and variance across per activation. If false, compute the mean and variance across per feature over each mini-batch. Default value is -name: "spatial" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
  • scale (heterogeneous)T: If spatial is true, the dimension of scale is (C). If spatial is false, the dimensions of scale are (C x D1 x … x Dn)

  • -
  • B (heterogeneous)T: If spatial is true, the dimension of bias is (C). If spatial is false, the dimensions of bias are (C x D1 x … x Dn)

  • -
  • mean (heterogeneous)T: If spatial is true, the dimension of the running mean (training) or the estimated mean (testing) is (C). If spatial is false, the dimensions of the running mean (training) or the estimated mean (testing) are (C x D1 x … x Dn).

  • -
  • var (heterogeneous)T: If spatial is true, the dimension of the running variance(training) or the estimated variance (testing) is (C). If spatial is false, the dimensions of the running variance(training) or the estimated variance (testing) are (C x D1 x … x Dn).

  • -
-

Outputs

-

Between 1 and 5 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • -
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator.

  • -
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator.

  • -
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation.

  • -
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBatchNormalization_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_9(*args, **kwargs)#
-

Version

-

Onnx name: BatchNormalization

-

This version of the operator has been available since -version 9.

-

Summary

-

Carries out batch normalization as described in the paper -https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, -there are multiple cases for the number of outputs, which we list below:

-

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) -Output case #2: Y (test mode)

-

For previous (depreciated) non-spatial cases, implementors are suggested -to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is -name: "momentum" f: 0.8999999761581421 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • -
  • scale (heterogeneous)T: Scale tensor of shape (C).

  • -
  • B (heterogeneous)T: Bias tensor of shape (C).

  • -
  • mean (heterogeneous)T: running (training) or estimated (testing) mean tensor of shape (C).

  • -
  • var (heterogeneous)T: running (training) or estimated (testing) variance tensor of shape (C).

  • -
-

Outputs

-

Between 1 and 5 outputs.

-
    -
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • -
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator.

  • -
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator.

  • -
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation.

  • -
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxBernoulli#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBernoulli(*args, **kwargs)#
-

Version

-

Onnx name: Bernoulli

-

This version of the operator has been available since -version 15.

-

Summary

-

Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor -containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, -where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p).

-

This operator is non-deterministic and may not produce the same values in different -implementations (even if a seed is specified).

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: All values in input have to be in the range:[0, 1].

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: The returned output tensor only has values 0 or 1, same shape as input tensor.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bool): Constrain output types to all numeric tensors and bool tensors.

  • -
-
- -
-
-
-
-

OnnxBernoulli_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBernoulli_15(*args, **kwargs)#
-

Version

-

Onnx name: Bernoulli

-

This version of the operator has been available since -version 15.

-

Summary

-

Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor -containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, -where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p).

-

This operator is non-deterministic and may not produce the same values in different -implementations (even if a seed is specified).

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: All values in input have to be in the range:[0, 1].

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: The returned output tensor only has values 0 or 1, same shape as input tensor.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bool): Constrain output types to all numeric tensors and bool tensors.

  • -
-
- -
-
-
-
-

OnnxBinarizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBinarizer(*args, **kwargs)#
-

Version

-

Onnx name: Binarizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.

-

Attributes

-
    -
  • threshold: Values greater than this are mapped to 1, others to 0. Default value is -name: "threshold" f: 0.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be binarized

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Binarized output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type. The output will be of the same tensor type.

  • -
-
- -
-
-
-
-

OnnxBinarizer_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBinarizer_1(*args, **kwargs)#
-

Version

-

Onnx name: Binarizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.

-

Attributes

-
    -
  • threshold: Values greater than this are mapped to 1, others to 0. Default value is -name: "threshold" f: 0.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be binarized

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Binarized output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type. The output will be of the same tensor type.

  • -
-
- -
-
-
-
-

OnnxBitShift#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBitShift(*args, **kwargs)#
-

Version

-

Onnx name: BitShift

-

This version of the operator has been available since -version 11.

-

Summary

-
-
Bitwise shift operator performs element-wise operation. For each input element, if the

attribute “direction” is “RIGHT”, this operator moves its binary representation toward -the right side so that the input value is effectively decreased. If the attribute “direction” -is “LEFT”, bits of binary representation moves toward the left side, which results the -increase of its actual value. The input X is the tensor to be shifted and another input -Y specifies the amounts of shifting. For example, if “direction” is “Right”, X is [1, 4], -and S is [1, 1], the corresponding output Z would be [0, 2]. If “direction” is “LEFT” with -X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].

-

Because this operator supports Numpy-style broadcasting, X’s and Y’s shapes are -not necessarily identical.

-
-
-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: First operand, input to be shifted.

  • -
  • Y (heterogeneous)T: Second operand, amounts of shift.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64): Constrain input and output types to integer tensors.

  • -
-
- -
-
-
-
-

OnnxBitShift_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxBitShift_11(*args, **kwargs)#
-

Version

-

Onnx name: BitShift

-

This version of the operator has been available since -version 11.

-

Summary

-
-
Bitwise shift operator performs element-wise operation. For each input element, if the

attribute “direction” is “RIGHT”, this operator moves its binary representation toward -the right side so that the input value is effectively decreased. If the attribute “direction” -is “LEFT”, bits of binary representation moves toward the left side, which results the -increase of its actual value. The input X is the tensor to be shifted and another input -Y specifies the amounts of shifting. For example, if “direction” is “Right”, X is [1, 4], -and S is [1, 1], the corresponding output Z would be [0, 2]. If “direction” is “LEFT” with -X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].

-

Because this operator supports Numpy-style broadcasting, X’s and Y’s shapes are -not necessarily identical.

-
-
-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: First operand, input to be shifted.

  • -
  • Y (heterogeneous)T: Second operand, amounts of shift.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64): Constrain input and output types to integer tensors.

  • -
-
- -
-
-
-
-

OnnxCast#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCast(*args, **kwargs)#
-

Version

-

Onnx name: Cast

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator casts the elements of a given input tensor to a data type -specified by the ‘to’ argument and returns an output tensor of the same size in -the converted type. The ‘to’ argument must be one of the data types specified -in the ‘DataType’ enum field in the TensorProto message.

-

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations -(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may -result 100. There are some string literals reserved for special floating-point values; -“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. -Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, -this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors -to string tensors, plain floating-point representation (such as “314.15926”) would be used. -Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases -of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

-

Conversion from a numerical type to any numerical type is always allowed. -User must be aware of precision loss and value change caused by range difference between two types. -For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting -an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • -
-
- -
-
-
-
-

OnnxCastLike#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCastLike(*args, **kwargs)#
-

Version

-

Onnx name: CastLike

-

This version of the operator has been available since -version 15.

-

Summary

-

The operator casts the elements of a given input tensor (the first input) to -the same data type as the elements of the second input tensor. -See documentation of the Cast operator for further details.

-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
  • target_type (heterogeneous)T2: The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor produced by casting the first input tensor to have the same type as the second input tensor.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • -
-
- -
-
-
-
-

OnnxCastLike_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCastLike_15(*args, **kwargs)#
-

Version

-

Onnx name: CastLike

-

This version of the operator has been available since -version 15.

-

Summary

-

The operator casts the elements of a given input tensor (the first input) to -the same data type as the elements of the second input tensor. -See documentation of the Cast operator for further details.

-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
  • target_type (heterogeneous)T2: The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor produced by casting the first input tensor to have the same type as the second input tensor.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • -
-
- -
-
-
-
-

OnnxCastMap#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCastMap(*args, **kwargs)#
-

Version

-

Onnx name: CastMap

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Converts a map to a tensor. -The map key must be an int64 and the values will be ordered -in ascending order based on this key. -The operator supports dense packing or sparse packing. -If using sparse packing, the key cannot exceed the max_map-1 value.

-

Attributes

-
    -
  • cast_to: A string indicating the desired element type of the output tensor, one of ‘TO_FLOAT’, ‘TO_STRING’, ‘TO_INT64’. Default value is -name: "cast_to" s: "TO_FLOAT" type: STRING

  • -
  • map_form: Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).<br>One of ‘DENSE’, ‘SPARSE’. Default value is -name: "map_form" s: "DENSE" type: STRING

  • -
  • max_map: If the value of map_form is ‘SPARSE,’ this attribute indicates the total length of the output tensor. Default value is -name: "max_map" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: The input map that is to be cast to a tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: A tensor representing the same data as the input map, ordered by their keys

  • -
-

Type Constraints

-
    -
  • T1 map(int64, string), map(int64, float): The input must be an integer map to either string or float.

  • -
  • T2 tensor(string), tensor(float), tensor(int64): The output is a 1-D tensor of string, float, or integer.

  • -
-
- -
-
-
-
-

OnnxCastMap_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCastMap_1(*args, **kwargs)#
-

Version

-

Onnx name: CastMap

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Converts a map to a tensor. -The map key must be an int64 and the values will be ordered -in ascending order based on this key. -The operator supports dense packing or sparse packing. -If using sparse packing, the key cannot exceed the max_map-1 value.

-

Attributes

-
    -
  • cast_to: A string indicating the desired element type of the output tensor, one of ‘TO_FLOAT’, ‘TO_STRING’, ‘TO_INT64’. Default value is -name: "cast_to" s: "TO_FLOAT" type: STRING

  • -
  • map_form: Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).<br>One of ‘DENSE’, ‘SPARSE’. Default value is -name: "map_form" s: "DENSE" type: STRING

  • -
  • max_map: If the value of map_form is ‘SPARSE,’ this attribute indicates the total length of the output tensor. Default value is -name: "max_map" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: The input map that is to be cast to a tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: A tensor representing the same data as the input map, ordered by their keys

  • -
-

Type Constraints

-
    -
  • T1 map(int64, string), map(int64, float): The input must be an integer map to either string or float.

  • -
  • T2 tensor(string), tensor(float), tensor(int64): The output is a 1-D tensor of string, float, or integer.

  • -
-
- -
-
-
-
-

OnnxCast_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCast_1(*args, **kwargs)#
-

Version

-

Onnx name: Cast

-

This version of the operator has been available since -version 1.

-

Summary

-

The operator casts the elements of a given input tensor to a data type -specified by the ‘to’ argument and returns an output tensor of the same size in -the converted type. The ‘to’ argument must be one of the data types specified -in the ‘DataType’ enum field in the TensorProto message. -NOTE: Casting to and from strings is not supported yet.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Casting from strings and complex are not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Casting to strings and complex are not supported.

  • -
-
- -
-
-
-
-

OnnxCast_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCast_13(*args, **kwargs)#
-

Version

-

Onnx name: Cast

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator casts the elements of a given input tensor to a data type -specified by the ‘to’ argument and returns an output tensor of the same size in -the converted type. The ‘to’ argument must be one of the data types specified -in the ‘DataType’ enum field in the TensorProto message.

-

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations -(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may -result 100. There are some string literals reserved for special floating-point values; -“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. -Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, -this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors -to string tensors, plain floating-point representation (such as “314.15926”) would be used. -Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases -of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

-

Conversion from a numerical type to any numerical type is always allowed. -User must be aware of precision loss and value change caused by range difference between two types. -For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting -an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • -
-
- -
-
-
-
-

OnnxCast_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCast_6(*args, **kwargs)#
-

Version

-

Onnx name: Cast

-

This version of the operator has been available since -version 6.

-

Summary

-

The operator casts the elements of a given input tensor to a data type -specified by the ‘to’ argument and returns an output tensor of the same size in -the converted type. The ‘to’ argument must be one of the data types specified -in the ‘DataType’ enum field in the TensorProto message. -NOTE: Casting to and from strings is not supported yet.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Casting from strings and complex are not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Casting to strings and complex are not supported.

  • -
-
- -
-
-
-
-

OnnxCast_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCast_9(*args, **kwargs)#
-

Version

-

Onnx name: Cast

-

This version of the operator has been available since -version 9.

-

Summary

-

The operator casts the elements of a given input tensor to a data type -specified by the ‘to’ argument and returns an output tensor of the same size in -the converted type. The ‘to’ argument must be one of the data types specified -in the ‘DataType’ enum field in the TensorProto message.

-

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations -(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may -result 100. There are some string literals reserved for special floating-point values; -“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. -Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, -this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors -to string tensors, plain floating-point representation (such as “314.15926”) would be used. -Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases -of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

-

Conversion from a numerical type to any numerical type is always allowed. -User must be aware of precision loss and value change caused by range difference between two types. -For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting -an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to be cast.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string): Constrain input types. Casting from complex is not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string): Constrain output types. Casting to complex is not supported.

  • -
-
- -
-
-
-
-

OnnxCategoryMapper#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCategoryMapper(*args, **kwargs)#
-

Version

-

Onnx name: CategoryMapper

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Converts strings to integers and vice versa.

-

Two sequences of equal length are used to map between integers and strings, -with strings and integers at the same index detailing the mapping.

-

Each operator converts either integers to strings or strings to integers, depending -on which default value attribute is provided. Only one default value attribute -should be defined.

-

If the string default value is set, it will convert integers to strings. -If the int default value is set, it will convert strings to integers.

-

Attributes

-
    -
  • -
  • -
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_int64" i: -1 type: INT

  • -
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_string" s: "_Unused" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • -
-

Type Constraints

-
    -
  • T1 tensor(string), tensor(int64): The input must be a tensor of strings or integers, either [N,C] or [C].

  • -
  • T2 tensor(string), tensor(int64): The output is a tensor of strings or integers. Its shape will be the same as the input shape.

  • -
-
- -
-
-
-
-

OnnxCategoryMapper_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCategoryMapper_1(*args, **kwargs)#
-

Version

-

Onnx name: CategoryMapper

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Converts strings to integers and vice versa.

-

Two sequences of equal length are used to map between integers and strings, -with strings and integers at the same index detailing the mapping.

-

Each operator converts either integers to strings or strings to integers, depending -on which default value attribute is provided. Only one default value attribute -should be defined.

-

If the string default value is set, it will convert integers to strings. -If the int default value is set, it will convert strings to integers.

-

Attributes

-
    -
  • -
  • -
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_int64" i: -1 type: INT

  • -
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_string" s: "_Unused" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • -
-

Type Constraints

-
    -
  • T1 tensor(string), tensor(int64): The input must be a tensor of strings or integers, either [N,C] or [C].

  • -
  • T2 tensor(string), tensor(int64): The output is a tensor of strings or integers. Its shape will be the same as the input shape.

  • -
-
- -
-
-
-
-

OnnxCeil#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCeil(*args, **kwargs)#
-

Version

-

Onnx name: Ceil

-

This version of the operator has been available since -version 13.

-

Summary

-

Ceil takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the ceil is, y = ceil(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCeil_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCeil_1(*args, **kwargs)#
-

Version

-

Onnx name: Ceil

-

This version of the operator has been available since -version 1.

-

Summary

-

Ceil takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the ceil is, y = ceil(x), is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCeil_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCeil_13(*args, **kwargs)#
-

Version

-

Onnx name: Ceil

-

This version of the operator has been available since -version 13.

-

Summary

-

Ceil takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the ceil is, y = ceil(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCeil_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCeil_6(*args, **kwargs)#
-

Version

-

Onnx name: Ceil

-

This version of the operator has been available since -version 6.

-

Summary

-

Ceil takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the ceil is, y = ceil(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCelu(*args, **kwargs)#
-

Version

-

Onnx name: Celu

-

This version of the operator has been available since -version 12.

-

Summary

-

Continuously Differentiable Exponential Linear Units: -Perform the linear unit element-wise on the input tensor X -using formula:

-
max(0,x) + min(0,alpha*(exp(x/alpha)-1))
-
-
-

Attributes

-
    -
  • alpha: The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float): Constrain input and output types to float32 tensors.

  • -
-
- -
-
-
-
-

OnnxCelu_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCelu_12(*args, **kwargs)#
-

Version

-

Onnx name: Celu

-

This version of the operator has been available since -version 12.

-

Summary

-

Continuously Differentiable Exponential Linear Units: -Perform the linear unit element-wise on the input tensor X -using formula:

-
max(0,x) + min(0,alpha*(exp(x/alpha)-1))
-
-
-

Attributes

-
    -
  • alpha: The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float): Constrain input and output types to float32 tensors.

  • -
-
- -
-
-
-
-

OnnxClip#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 13.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified by the inputs ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max(), respectively.

-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • -
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxClip_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip_1(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 1.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified with arguments ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max() respectively.

-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxClip_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip_11(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 11.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified by the inputs ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max(), respectively.

-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • -
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxClip_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip_12(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 12.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified by the inputs ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max(), respectively.

-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • -
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxClip_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip_13(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 13.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified by the inputs ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max(), respectively.

-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • -
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxClip_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxClip_6(*args, **kwargs)#
-

Version

-

Onnx name: Clip

-

This version of the operator has been available since -version 6.

-

Summary

-

Clip operator limits the given input within an interval. The interval is -specified with arguments ‘min’ and ‘max’. They default to -numeric_limits::lowest() and numeric_limits::max() respectively.

-

Attributes

-
    -
  • max: Maximum value, above which element is replaced by max Default value is -name: "max" f: 3.4028234663852886e+38 type: FLOAT

  • -
  • min: Minimum value, under which element is replaced by min Default value is -name: "min" f: -3.4028234663852886e+38 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor with clipped input elements

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCompress#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCompress(*args, **kwargs)#
-

Version

-

Onnx name: Compress

-

This version of the operator has been available since -version 11.

-

Summary

-

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. -In case axis is not provided, input is flattened before elements are selected. -Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(bool): Constrains to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxCompress_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCompress_11(*args, **kwargs)#
-

Version

-

Onnx name: Compress

-

This version of the operator has been available since -version 11.

-

Summary

-

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. -In case axis is not provided, input is flattened before elements are selected. -Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(bool): Constrains to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxCompress_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCompress_9(*args, **kwargs)#
-

Version

-

Onnx name: Compress

-

This version of the operator has been available since -version 9.

-

Summary

-

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. -In case axis is not provided, input is flattened before elements are selected. -Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length alone the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(bool): Constrains to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxConcat#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcat(*args, **kwargs)#
-

Version

-

Onnx name: Concat

-

This version of the operator has been available since -version 13.

-

Summary

-

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConcatFromSequence#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence(*args, **kwargs)#
-

Version

-

Onnx name: ConcatFromSequence

-

This version of the operator has been available since -version 11.

-

Summary

-

Concatenate a sequence of tensors into a single tensor. -All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. -By default ‘new_axis’ is 0, the behavior is similar to numpy.concatenate. -When ‘new_axis’ is 1, the behavior is similar to numpy.stack.

-

Attributes

-
    -
  • -
  • new_axis: Insert and concatenate on a new axis or not, default 0 means do not insert new axis. Default value is -name: "new_axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Sequence of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any tensor type.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConcatFromSequence_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence_11(*args, **kwargs)#
-

Version

-

Onnx name: ConcatFromSequence

-

This version of the operator has been available since -version 11.

-

Summary

-

Concatenate a sequence of tensors into a single tensor. -All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. -By default ‘new_axis’ is 0, the behavior is similar to numpy.concatenate. -When ‘new_axis’ is 1, the behavior is similar to numpy.stack.

-

Attributes

-
    -
  • -
  • new_axis: Insert and concatenate on a new axis or not, default 0 means do not insert new axis. Default value is -name: "new_axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Sequence of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any tensor type.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConcat_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcat_1(*args, **kwargs)#
-

Version

-

Onnx name: Concat

-

This version of the operator has been available since -version 1.

-

Summary

-

Concatenate a list of tensors into a single tensor

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConcat_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcat_11(*args, **kwargs)#
-

Version

-

Onnx name: Concat

-

This version of the operator has been available since -version 11.

-

Summary

-

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConcat_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcat_13(*args, **kwargs)#
-

Version

-

Onnx name: Concat

-

This version of the operator has been available since -version 13.

-

Summary

-

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConcat_4#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConcat_4(*args, **kwargs)#
-

Version

-

Onnx name: Concat

-

This version of the operator has been available since -version 4.

-

Summary

-

Concatenate a list of tensors into a single tensor

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • -
-

Outputs

-
    -
  • concat_result (heterogeneous)T: Concatenated tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxConstant#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 13.

-

Summary

-

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, -or value_* must be specified.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxConstantOfShape#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstantOfShape(*args, **kwargs)#
-

Version

-

Onnx name: ConstantOfShape

-

This version of the operator has been available since -version 9.

-

Summary

-

Generate a tensor with given value and shape.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: 1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of shape specified by ‘input’.If attribute ‘value’ is specified, the value and datatype of the output tensor is taken from ‘value’.If attribute ‘value’ is not specified, the value in the output defaults to 0, and the datatype defaults to float32.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int64): Constrain input types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types to be numerics.

  • -
-
- -
-
-
-
-

OnnxConstantOfShape_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstantOfShape_9(*args, **kwargs)#
-

Version

-

Onnx name: ConstantOfShape

-

This version of the operator has been available since -version 9.

-

Summary

-

Generate a tensor with given value and shape.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: 1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of shape specified by ‘input’.If attribute ‘value’ is specified, the value and datatype of the output tensor is taken from ‘value’.If attribute ‘value’ is not specified, the value in the output defaults to 0, and the datatype defaults to float32.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int64): Constrain input types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types to be numerics.

  • -
-
- -
-
-
-
-

OnnxConstant_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant_1(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 1.

-

Summary

-

A constant tensor.

-

Attributes

-
    -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConstant_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant_11(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 11.

-

Summary

-

A constant tensor. Exactly one of the two attributes, either value or sparse_value, -must be specified.

-

Attributes

-
    -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxConstant_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant_12(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 12.

-

Summary

-

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, -or value_* must be specified.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxConstant_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant_13(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 13.

-

Summary

-

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, -or value_* must be specified.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxConstant_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConstant_9(*args, **kwargs)#
-

Version

-

Onnx name: Constant

-

This version of the operator has been available since -version 9.

-

Summary

-

A constant tensor.

-

Attributes

-
    -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxConv#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConv(*args, **kwargs)#
-

Version

-

Onnx name: Conv

-

This version of the operator has been available since -version 11.

-

Summary

-

The convolution operator consumes an input tensor and a filter, and -computes the output.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G.

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConvInteger#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConvInteger(*args, **kwargs)#
-

Version

-

Onnx name: ConvInteger

-

This version of the operator has been available since -version 10.

-

Summary

-

The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, -and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 4 inputs.

-
    -
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • -
  • x_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘x’. It’s optional and default value is 0. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • w_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘w’. It’s optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M)

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input x and its zero point data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input w and its zero point data type to 8-bit integer tensor.

  • -
  • T3 tensor(int32): Constrain output y data type to 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxConvInteger_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConvInteger_10(*args, **kwargs)#
-

Version

-

Onnx name: ConvInteger

-

This version of the operator has been available since -version 10.

-

Summary

-

The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, -and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 4 inputs.

-
    -
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • -
  • x_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘x’. It’s optional and default value is 0. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • w_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘w’. It’s optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M)

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input x and its zero point data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input w and its zero point data type to 8-bit integer tensor.

  • -
  • T3 tensor(int32): Constrain output y data type to 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxConvTranspose#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConvTranspose(*args, **kwargs)#
-

Version

-

Onnx name: ConvTranspose

-

This version of the operator has been available since -version 11.

-

Summary

-

The convolution transpose operator consumes an input tensor and a filter, -and computes the output.

-

If the pads parameter is provided the shape of the output is calculated via the following equation:

-
-

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

-
-

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

-
-

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] -If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) -Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

-
-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = input_shape[i] * strides[i] for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConvTranspose_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConvTranspose_1(*args, **kwargs)#
-

Version

-

Onnx name: ConvTranspose

-

This version of the operator has been available since -version 1.

-

Summary

-

The convolution transpose operator consumes an input tensor and a filter, -and computes the output.

-

If the pads parameter is provided the shape of the output is calculated via the following equation:

-
-

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

-
-

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

-
-

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] -If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) -Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

-
-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConvTranspose_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConvTranspose_11(*args, **kwargs)#
-

Version

-

Onnx name: ConvTranspose

-

This version of the operator has been available since -version 11.

-

Summary

-

The convolution transpose operator consumes an input tensor and a filter, -and computes the output.

-

If the pads parameter is provided the shape of the output is calculated via the following equation:

-
-

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

-
-

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

-
-

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] -If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) -Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

-
-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = input_shape[i] * strides[i] for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConv_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConv_1(*args, **kwargs)#
-

Version

-

Onnx name: Conv

-

This version of the operator has been available since -version 1.

-

Summary

-

The convolution operator consumes an input tensor and a filter, and -computes the output.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxConv_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxConv_11(*args, **kwargs)#
-

Version

-

Onnx name: Conv

-

This version of the operator has been available since -version 11.

-

Summary

-

The convolution operator consumes an input tensor and a filter, and -computes the output.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G.

  • -
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCos#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCos(*args, **kwargs)#
-

Version

-

Onnx name: Cos

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the cosine of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The cosine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCos_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCos_7(*args, **kwargs)#
-

Version

-

Onnx name: Cos

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the cosine of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The cosine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCosh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCosh(*args, **kwargs)#
-

Version

-

Onnx name: Cosh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic cosine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic cosine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCosh_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCosh_9(*args, **kwargs)#
-

Version

-

Onnx name: Cosh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic cosine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic cosine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxCumSum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCumSum(*args, **kwargs)#
-

Version

-

Onnx name: CumSum

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs cumulative sum of the input elements along the given axis. -By default, it will do the sum inclusively meaning the first element is copied as is. -Through an exclusive attribute, this behavior can change to exclude the first element. -It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

-

Example:

-
input_x = [1, 2, 3]
-axis=0
-output = [1, 3, 6]
-exclusive=1
-output = [0, 1, 3]
-exclusive=0
-reverse=1
-output = [6, 5, 3]
-exclusive=1
-reverse=1
-output = [5, 3, 0]
-
-
-

Attributes

-
    -
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is -name: "exclusive" i: 0 type: INT

  • -
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is -name: "reverse" i: 0 type: INT

  • -
-

Inputs

-
    -
  • x (heterogeneous)T: An input tensor that is to be processed.

  • -
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • -
-
- -
-
-
-
-

OnnxCumSum_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCumSum_11(*args, **kwargs)#
-

Version

-

Onnx name: CumSum

-

This version of the operator has been available since -version 11.

-

Summary

-

Performs cumulative sum of the input elements along the given axis. -By default, it will do the sum inclusively meaning the first element is copied as is. -Through an exclusive attribute, this behavior can change to exclude the first element. -It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

-

Example:

-
input_x = [1, 2, 3]
-axis=0
-output = [1, 3, 6]
-exclusive=1
-output = [0, 1, 3]
-exclusive=0
-reverse=1
-output = [6, 5, 3]
-exclusive=1
-reverse=1
-output = [5, 3, 0]
-
-
-

Attributes

-
    -
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is -name: "exclusive" i: 0 type: INT

  • -
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is -name: "reverse" i: 0 type: INT

  • -
-

Inputs

-
    -
  • x (heterogeneous)T: An input tensor that is to be processed.

  • -
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float), tensor(double): Input can be of any tensor type.

  • -
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • -
-
- -
-
-
-
-

OnnxCumSum_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxCumSum_14(*args, **kwargs)#
-

Version

-

Onnx name: CumSum

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs cumulative sum of the input elements along the given axis. -By default, it will do the sum inclusively meaning the first element is copied as is. -Through an exclusive attribute, this behavior can change to exclude the first element. -It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

-

Example:

-
input_x = [1, 2, 3]
-axis=0
-output = [1, 3, 6]
-exclusive=1
-output = [0, 1, 3]
-exclusive=0
-reverse=1
-output = [6, 5, 3]
-exclusive=1
-reverse=1
-output = [5, 3, 0]
-
-
-

Attributes

-
    -
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is -name: "exclusive" i: 0 type: INT

  • -
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is -name: "reverse" i: 0 type: INT

  • -
-

Inputs

-
    -
  • x (heterogeneous)T: An input tensor that is to be processed.

  • -
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • -
-
- -
-
-
-
-

OnnxDepthToSpace#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace(*args, **kwargs)#
-

Version

-

Onnx name: DepthToSpace

-

This version of the operator has been available since -version 13.

-

Summary

-

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. -This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of -the input tensor where values from the depth dimension are moved in spatial blocks to the height -and width dimensions. By default, mode = DCR. -In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the -following order: depth, column, and then row. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])

-

tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])

-

y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])

-

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the -following order: column, row, and the depth. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])

-

tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])

-

y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])

-

Attributes

-
    -
  • -
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is -name: "mode" s: "DCR" type: STRING

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxDepthToSpace_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_1(*args, **kwargs)#
-

Version

-

Onnx name: DepthToSpace

-

This version of the operator has been available since -version 1.

-

Summary

-

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. -This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of -the input tensor where values from the depth dimension are moved in spatial blocks to the height -and width dimensions.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxDepthToSpace_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_11(*args, **kwargs)#
-

Version

-

Onnx name: DepthToSpace

-

This version of the operator has been available since -version 11.

-

Summary

-

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. -This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of -the input tensor where values from the depth dimension are moved in spatial blocks to the height -and width dimensions. By default, mode = DCR. -In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the -following order: depth, column, and then row. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])

-

tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])

-

y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])

-

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the -following order: column, row, and the depth. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])

-

tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])

-

y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])

-

Attributes

-
    -
  • -
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is -name: "mode" s: "DCR" type: STRING

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxDepthToSpace_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_13(*args, **kwargs)#
-

Version

-

Onnx name: DepthToSpace

-

This version of the operator has been available since -version 13.

-

Summary

-

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. -This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of -the input tensor where values from the depth dimension are moved in spatial blocks to the height -and width dimensions. By default, mode = DCR. -In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the -following order: depth, column, and then row. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])

-

tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])

-

y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])

-

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the -following order: column, row, and the depth. The output y is computed from the input x as below:

-

b, c, h, w = x.shape

-

tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])

-

tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])

-

y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])

-

Attributes

-
    -
  • -
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is -name: "mode" s: "DCR" type: STRING

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxDequantizeLinear#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear(*args, **kwargs)#
-

Version

-

Onnx name: DequantizeLinear

-

This version of the operator has been available since -version 13.

-

Summary

-

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. -The dequantization formula is y = (x - x_zero_point) * x_scale. ‘x_scale’ and ‘x_zero_point’ must have same shape, and can be either a scalar -for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -‘x_zero_point’ and ‘x’ must have same type. ‘x’ and ‘y’ must have same shape. In the case of dequantizing int32, -there’s no zero point (zero point is supposed to be 0).

-

Attributes

-
    -
  • axis: (Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • -
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization.

  • -
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. Shape must match x_scale. It’s optional. Zero point is 0 when it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxDequantizeLinear_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_10(*args, **kwargs)#
-

Version

-

Onnx name: DequantizeLinear

-

This version of the operator has been available since -version 10.

-

Summary

-

The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor. -The dequantization formula is y = (x - x_zero_point) * x_scale. ‘x_scale’ and ‘x_zero_point’ are both scalars. -‘x_zero_point’ and ‘x’ must have same type. ‘x’ and ‘y’ must have same shape. In the case of dequantizing int32, -there’s no zero point (zero point is supposed to be 0).

-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • -
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization. It’s optional. 0 is the default value when it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxDequantizeLinear_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_13(*args, **kwargs)#
-

Version

-

Onnx name: DequantizeLinear

-

This version of the operator has been available since -version 13.

-

Summary

-

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. -The dequantization formula is y = (x - x_zero_point) * x_scale. ‘x_scale’ and ‘x_zero_point’ must have same shape, and can be either a scalar -for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -‘x_zero_point’ and ‘x’ must have same type. ‘x’ and ‘y’ must have same shape. In the case of dequantizing int32, -there’s no zero point (zero point is supposed to be 0).

-

Attributes

-
    -
  • axis: (Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • -
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization.

  • -
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. Shape must match x_scale. It’s optional. Zero point is 0 when it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxDet#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDet(*args, **kwargs)#
-

Version

-

Onnx name: Det

-

This version of the operator has been available since -version 11.

-

Summary

-

Det calculates determinant of a square matrix or batches of square matrices. -Det takes one input tensor of shape [*, M, M], where * is zero or more batch dimensions, -and the inner-most 2 dimensions form square matrices. -The output is a tensor of shape [*], containing the determinants of all input submatrices. -e.g., When the input is 2-D, the output is a scalar(shape is empty: []).

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to floating-point tensors.

  • -
-
- -
-
-
-
-

OnnxDet_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDet_11(*args, **kwargs)#
-

Version

-

Onnx name: Det

-

This version of the operator has been available since -version 11.

-

Summary

-

Det calculates determinant of a square matrix or batches of square matrices. -Det takes one input tensor of shape [*, M, M], where * is zero or more batch dimensions, -and the inner-most 2 dimensions form square matrices. -The output is a tensor of shape [*], containing the determinants of all input submatrices. -e.g., When the input is 2-D, the output is a scalar(shape is empty: []).

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to floating-point tensors.

  • -
-
- -
-
-
-
-

OnnxDictVectorizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDictVectorizer(*args, **kwargs)#
-

Version

-

Onnx name: DictVectorizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Uses an index mapping to convert a dictionary to an array.

-

Given a dictionary, each key is looked up in the vocabulary attribute corresponding to -the key type. The index into the vocabulary array at which the key is found is then -used to index the output 1-D tensor ‘Y’ and insert into it the value found in the dictionary ‘X’.

-

The key type of the input map must correspond to the element type of the defined vocabulary attribute. -Therefore, the output array will be equal in length to the index mapping vector parameter. -All keys in the input dictionary must be present in the index mapping vector. -For each item in the input dictionary, insert its value in the output array. -Any keys not present in the input dictionary, will be zero in the output array.

-

For example: if the string_vocabulary parameter is set to ["a", "c", "b", "z"], -then an input of {"a": 4, "c": 8} will produce an output of [4, 8, 0, 0].

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: A dictionary.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: A 1-D tensor holding values from the input dictionary.

  • -
-

Type Constraints

-
    -
  • T1 map(string, int64), map(int64, string), map(int64, float), map(int64, double), map(string, float), map(string, double): The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.

  • -
  • T2 tensor(int64), tensor(float), tensor(double), tensor(string): The output will be a tensor of the value type of the input map. It’s shape will be [1,C], where C is the length of the input dictionary.

  • -
-
- -
-
-
-
-

OnnxDictVectorizer_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDictVectorizer_1(*args, **kwargs)#
-

Version

-

Onnx name: DictVectorizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Uses an index mapping to convert a dictionary to an array.

-

Given a dictionary, each key is looked up in the vocabulary attribute corresponding to -the key type. The index into the vocabulary array at which the key is found is then -used to index the output 1-D tensor ‘Y’ and insert into it the value found in the dictionary ‘X’.

-

The key type of the input map must correspond to the element type of the defined vocabulary attribute. -Therefore, the output array will be equal in length to the index mapping vector parameter. -All keys in the input dictionary must be present in the index mapping vector. -For each item in the input dictionary, insert its value in the output array. -Any keys not present in the input dictionary, will be zero in the output array.

-

For example: if the string_vocabulary parameter is set to ["a", "c", "b", "z"], -then an input of {"a": 4, "c": 8} will produce an output of [4, 8, 0, 0].

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: A dictionary.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: A 1-D tensor holding values from the input dictionary.

  • -
-

Type Constraints

-
    -
  • T1 map(string, int64), map(int64, string), map(int64, float), map(int64, double), map(string, float), map(string, double): The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.

  • -
  • T2 tensor(int64), tensor(float), tensor(double), tensor(string): The output will be a tensor of the value type of the input map. It’s shape will be [1,C], where C is the length of the input dictionary.

  • -
-
- -
-
-
-
-

OnnxDiv#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary division (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxDiv_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv_1(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 1.

-

Summary

-

Performs element-wise binary division (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxDiv_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv_13(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 13.

-

Summary

-

Performs element-wise binary division (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxDiv_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv_14(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary division (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxDiv_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv_6(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 6.

-

Summary

-

Performs element-wise binary division (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxDiv_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDiv_7(*args, **kwargs)#
-

Version

-

Onnx name: Div

-

This version of the operator has been available since -version 7.

-

Summary

-

Performs element-wise binary division (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxDropout#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 13.

-

Summary

-

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, -output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; -Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, -the user can simply not pass training_mode input or set it to false.

-
output = scale * data * mask,
-
-
-

where

-
scale = 1. / (1. - ratio).
-
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • -
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T2: The output mask.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • -
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_1(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 1.

-

Summary

-

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, -output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in -test mode or not, the output Y will either be a random dropout, or a simple -copy of the input. Note that our implementation of Dropout does scaling in -the training phase, so during testing nothing needs to be done.

-

Attributes

-
    -
  • -
  • is_test: (int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X. Default value is -name: "is_test" i: 0 type: INT

  • -
  • ratio: (float, default 0.5) the ratio of random dropout Default value is -name: "ratio" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T: The output mask. If is_test is nonzero, this output is not filled.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_10(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 10.

-

Summary

-

Dropout takes one input floating tensor and produces two tensor outputs, -output (floating tensor) and mask (Tensor<bool>). Depending on whether it is -in test mode or not, the output Y will either be a random dropout, or a simple -copy of the input. Note that our implementation of Dropout does scaling in -the training phase, so during testing nothing needs to be done. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • ratio: The ratio of random dropout Default value is -name: "ratio" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T1: The output mask.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(bool): Constrain output mask types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_12(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 12.

-

Summary

-

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, -output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; -Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, -the user can simply not pass training_mode input or set it to false.

-
output = scale * data * mask,
-
-
-

where

-
scale = 1. / (1. - ratio).
-
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • -
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T2: The output mask.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • -
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_13(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 13.

-

Summary

-

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, -output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; -Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, -the user can simply not pass training_mode input or set it to false.

-
output = scale * data * mask,
-
-
-

where

-
scale = 1. / (1. - ratio).
-
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 3 inputs.

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • -
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T2: The output mask.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • -
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_6(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 6.

-

Summary

-

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, -output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in -test mode or not, the output Y will either be a random dropout, or a simple -copy of the input. Note that our implementation of Dropout does scaling in -the training phase, so during testing nothing needs to be done.

-

Attributes

-
    -
  • is_test: (int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X. Default value is -name: "is_test" i: 0 type: INT

  • -
  • ratio: (float, default 0.5) the ratio of random dropout Default value is -name: "ratio" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T: The output mask. If is_test is nonzero, this output is not filled.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxDropout_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDropout_7(*args, **kwargs)#
-

Version

-

Onnx name: Dropout

-

This version of the operator has been available since -version 7.

-

Summary

-

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, -output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in -test mode or not, the output Y will either be a random dropout, or a simple -copy of the input. Note that our implementation of Dropout does scaling in -the training phase, so during testing nothing needs to be done. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • ratio: The ratio of random dropout Default value is -name: "ratio" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: The output.

  • -
  • mask (optional, heterogeneous)T: The output mask.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxDynamicQuantizeLinear#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear(*args, **kwargs)#
-

Version

-

Onnx name: DynamicQuantizeLinear

-

This version of the operator has been available since -version 11.

-

Summary

-

A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. -Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. -Scale is calculated as:

-
y_scale = (max(x) - min(x))/(qmax - qmin)
-* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8
-* data range is adjusted to include 0.
-
-
-

Zero point is calculated as:

-
intermediate_zero_point = qmin - min(x)/y_scale
-y_zero_point = cast(round(saturate(itermediate_zero_point)))
-* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8
-* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.
-* rounding to nearest ties to even.
-
-
-

Data quantization formula is:

-
y = saturate (round (x / y_scale) + y_zero_point)
-* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.
-* rounding to nearest ties to even.
-
-
-

Inputs

-
    -
  • x (heterogeneous)T1: Input tensor

  • -
-

Outputs

-
    -
  • y (heterogeneous)T2: Quantized output tensor

  • -
  • y_scale (heterogeneous)tensor(float): Output scale. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • y_zero_point (heterogeneous)T2: Output zero point. It’s a scalar, which means a per-tensor/layer quantization.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float): Constrain ‘x’ to float tensor.

  • -
  • T2 tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit unsigned integer tensor.

  • -
-
- -
-
-
-
-

OnnxDynamicQuantizeLinear_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear_11(*args, **kwargs)#
-

Version

-

Onnx name: DynamicQuantizeLinear

-

This version of the operator has been available since -version 11.

-

Summary

-

A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. -Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. -Scale is calculated as:

-
y_scale = (max(x) - min(x))/(qmax - qmin)
-* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8
-* data range is adjusted to include 0.
-
-
-

Zero point is calculated as:

-
intermediate_zero_point = qmin - min(x)/y_scale
-y_zero_point = cast(round(saturate(itermediate_zero_point)))
-* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8
-* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.
-* rounding to nearest ties to even.
-
-
-

Data quantization formula is:

-
y = saturate (round (x / y_scale) + y_zero_point)
-* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.
-* rounding to nearest ties to even.
-
-
-

Inputs

-
    -
  • x (heterogeneous)T1: Input tensor

  • -
-

Outputs

-
    -
  • y (heterogeneous)T2: Quantized output tensor

  • -
  • y_scale (heterogeneous)tensor(float): Output scale. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • y_zero_point (heterogeneous)T2: Output zero point. It’s a scalar, which means a per-tensor/layer quantization.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float): Constrain ‘x’ to float tensor.

  • -
  • T2 tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit unsigned integer tensor.

  • -
-
- -
-
-
-
-

OnnxEinsum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEinsum(*args, **kwargs)#
-

Version

-

Onnx name: Einsum

-

This version of the operator has been available since -version 12.

-

Summary

-

An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation

-
where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)
-that do not occur in the output-term.
-
-The Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation
-convention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to
-an operand tensor, and the characters within the terms correspond to operands dimensions.
-
-This sequence may be followed by "->" to separate the left and right hand side of the equation.
-If the equation contains "->" followed by the right-hand side, the explicit (not classical) form of the Einstein
-summation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,
-output indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the
-equation.
-
-When a dimension character is repeated in the left-hand side, it represents summation along the dimension.
-
-The equation may contain ellipsis ("...") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.
-Specifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.
-The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the
-beginning of the output. The equation string may contain space (U+0020) character.
-
-
-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • Inputs (variadic, heterogeneous)T: Operands

  • -
-

Outputs

-
    -
  • Output (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numerical tensor types.

  • -
-
- -
-
-
-
-

OnnxEinsum_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEinsum_12(*args, **kwargs)#
-

Version

-

Onnx name: Einsum

-

This version of the operator has been available since -version 12.

-

Summary

-

An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation

-
where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)
-that do not occur in the output-term.
-
-The Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation
-convention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to
-an operand tensor, and the characters within the terms correspond to operands dimensions.
-
-This sequence may be followed by "->" to separate the left and right hand side of the equation.
-If the equation contains "->" followed by the right-hand side, the explicit (not classical) form of the Einstein
-summation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,
-output indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the
-equation.
-
-When a dimension character is repeated in the left-hand side, it represents summation along the dimension.
-
-The equation may contain ellipsis ("...") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.
-Specifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.
-The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the
-beginning of the output. The equation string may contain space (U+0020) character.
-
-
-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • Inputs (variadic, heterogeneous)T: Operands

  • -
-

Outputs

-
    -
  • Output (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numerical tensor types.

  • -
-
- -
-
-
-
-

OnnxElu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxElu(*args, **kwargs)#
-

Version

-

Onnx name: Elu

-

This version of the operator has been available since -version 6.

-

Summary

-

Elu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < -0, f(x) = x for x >= 0., is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of ELU. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: 1D input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: 1D output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxElu_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxElu_1(*args, **kwargs)#
-

Version

-

Onnx name: Elu

-

This version of the operator has been available since -version 1.

-

Summary

-

Elu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < -0, f(x) = x for x >= 0., is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of ELU default to 1.0. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: 1D input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: 1D input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxElu_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxElu_6(*args, **kwargs)#
-

Version

-

Onnx name: Elu

-

This version of the operator has been available since -version 6.

-

Summary

-

Elu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < -0, f(x) = x for x >= 0., is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of ELU. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: 1D input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: 1D output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxEqual#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEqual(*args, **kwargs)#
-

Version

-

Onnx name: Equal

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxEqual_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEqual_1(*args, **kwargs)#
-

Version

-

Onnx name: Equal

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the equal logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool), tensor(int32), tensor(int64): Constrains input to integral tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxEqual_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEqual_11(*args, **kwargs)#
-

Version

-

Onnx name: Equal

-

This version of the operator has been available since -version 11.

-

Summary

-

Returns the tensor resulted from performing the equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxEqual_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEqual_13(*args, **kwargs)#
-

Version

-

Onnx name: Equal

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxEqual_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEqual_7(*args, **kwargs)#
-

Version

-

Onnx name: Equal

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool), tensor(int32), tensor(int64): Constrains input to integral tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxErf#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxErf(*args, **kwargs)#
-

Version

-

Onnx name: Erf

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the error function of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxErf_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxErf_13(*args, **kwargs)#
-

Version

-

Onnx name: Erf

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the error function of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxErf_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxErf_9(*args, **kwargs)#
-

Version

-

Onnx name: Erf

-

This version of the operator has been available since -version 9.

-

Summary

-

Computes the error function of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxExp#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExp(*args, **kwargs)#
-

Version

-

Onnx name: Exp

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the exponential of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxExp_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExp_1(*args, **kwargs)#
-

Version

-

Onnx name: Exp

-

This version of the operator has been available since -version 1.

-

Summary

-

Calculates the exponential of the given input tensor, element-wise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxExp_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExp_13(*args, **kwargs)#
-

Version

-

Onnx name: Exp

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the exponential of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxExp_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExp_6(*args, **kwargs)#
-

Version

-

Onnx name: Exp

-

This version of the operator has been available since -version 6.

-

Summary

-

Calculates the exponential of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxExpand#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExpand(*args, **kwargs)#
-

Version

-

Onnx name: Expand

-

This version of the operator has been available since -version 13.

-

Summary

-

Broadcast the input tensor following the given shape and the broadcast rule. -The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): -Dimensions are right alignment; -Two corresponding dimension must have the same value, or one of them is equal to 1. -Also, this operator is similar to numpy.broadcast_to(input, shape), -but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). -It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, -or the shape.ndim < input.shape.ndim.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • -
-
- -
-
-
-
-

OnnxExpand_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExpand_13(*args, **kwargs)#
-

Version

-

Onnx name: Expand

-

This version of the operator has been available since -version 13.

-

Summary

-

Broadcast the input tensor following the given shape and the broadcast rule. -The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): -Dimensions are right alignment; -Two corresponding dimension must have the same value, or one of them is equal to 1. -Also, this operator is similar to numpy.broadcast_to(input, shape), -but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). -It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, -or the shape.ndim < input.shape.ndim.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • -
-
- -
-
-
-
-

OnnxExpand_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxExpand_8(*args, **kwargs)#
-

Version

-

Onnx name: Expand

-

This version of the operator has been available since -version 8.

-

Summary

-

Broadcast the input tensor following the given shape and the broadcast rule. -The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): -Dimensions are right alignment; -Two corresponding dimension must have the same value, or one of them is equal to 1. -Also, this operator is similar to numpy.broadcast_to(input, shape), -but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). -It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, -or the shape.ndim < input.shape.ndim.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • -
-
- -
-
-
-
-

OnnxEyeLike#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEyeLike(*args, **kwargs)#
-

Version

-

Onnx name: EyeLike

-

This version of the operator has been available since -version 9.

-

Summary

-

Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D -tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the -same as the input tensor. The data type can be specified by the ‘dtype’ argument. If -‘dtype’ is not specified, then the type of input tensor is used. By default, the main diagonal -is populated with ones, but attribute ‘k’ can be used to populate upper or lower diagonals. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message and be valid as an output type.

-

Attributes

-
    -
  • -
  • k: (Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal. Default value is -name: "k" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: 2D input tensor to copy shape, and optionally, type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor, same shape as input tensor T1.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Strings and complex are not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Strings and complex are not supported.

  • -
-
- -
-
-
-
-

OnnxEyeLike_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxEyeLike_9(*args, **kwargs)#
-

Version

-

Onnx name: EyeLike

-

This version of the operator has been available since -version 9.

-

Summary

-

Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D -tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the -same as the input tensor. The data type can be specified by the ‘dtype’ argument. If -‘dtype’ is not specified, then the type of input tensor is used. By default, the main diagonal -is populated with ones, but attribute ‘k’ can be used to populate upper or lower diagonals. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message and be valid as an output type.

-

Attributes

-
    -
  • -
  • k: (Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal. Default value is -name: "k" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: 2D input tensor to copy shape, and optionally, type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor, same shape as input tensor T1.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Strings and complex are not supported.

  • -
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Strings and complex are not supported.

  • -
-
- -
-
-
-
-

OnnxFeatureVectorizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer(*args, **kwargs)#
-

Version

-

Onnx name: FeatureVectorizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Concatenates input tensors into one continuous output.

-

All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. -Inputs are copied to the output maintaining the order of the input arguments.

-

All inputs must be integers or floats, while the output will be all floating point values.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • X (variadic, heterogeneous)T1: An ordered collection of tensors, all with the same element type.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): The output array, elements ordered as the inputs.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int32), tensor(int64), tensor(float), tensor(double): The input type must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxFeatureVectorizer_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer_1(*args, **kwargs)#
-

Version

-

Onnx name: FeatureVectorizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Concatenates input tensors into one continuous output.

-

All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. -Inputs are copied to the output maintaining the order of the input arguments.

-

All inputs must be integers or floats, while the output will be all floating point values.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • X (variadic, heterogeneous)T1: An ordered collection of tensors, all with the same element type.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): The output array, elements ordered as the inputs.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int32), tensor(int64), tensor(float), tensor(double): The input type must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxFlatten#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFlatten(*args, **kwargs)#
-

Version

-

Onnx name: Flatten

-

This version of the operator has been available since -version 13.

-

Summary

-

Flattens the input tensor into a 2D matrix. If input tensor has shape -(d_0, d_1, … d_n) then the output will have shape -(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

-

Attributes

-
    -
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: A tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • -
-
- -
-
-
-
-

OnnxFlatten_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFlatten_1(*args, **kwargs)#
-

Version

-

Onnx name: Flatten

-

This version of the operator has been available since -version 1.

-

Summary

-

Flattens the input tensor into a 2D matrix. If input tensor has shape -(d_0, d_1, … d_n) then the output will have shape -(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

-

Attributes

-
    -
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: A tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxFlatten_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFlatten_11(*args, **kwargs)#
-

Version

-

Onnx name: Flatten

-

This version of the operator has been available since -version 11.

-

Summary

-

Flattens the input tensor into a 2D matrix. If input tensor has shape -(d_0, d_1, … d_n) then the output will have shape -(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

-

Attributes

-
    -
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: A tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • -
-
- -
-
-
-
-

OnnxFlatten_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFlatten_13(*args, **kwargs)#
-

Version

-

Onnx name: Flatten

-

This version of the operator has been available since -version 13.

-

Summary

-

Flattens the input tensor into a 2D matrix. If input tensor has shape -(d_0, d_1, … d_n) then the output will have shape -(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

-

Attributes

-
    -
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: A tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • -
-
- -
-
-
-
-

OnnxFlatten_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFlatten_9(*args, **kwargs)#
-

Version

-

Onnx name: Flatten

-

This version of the operator has been available since -version 9.

-

Summary

-

Flattens the input tensor into a 2D matrix. If input tensor has shape -(d_0, d_1, … d_n) then the output will have shape -(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

-

Attributes

-
    -
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: A tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • -
-
- -
-
-
-
-

OnnxFloor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFloor(*args, **kwargs)#
-

Version

-

Onnx name: Floor

-

This version of the operator has been available since -version 13.

-

Summary

-

Floor takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the floor is, y = floor(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxFloor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFloor_1(*args, **kwargs)#
-

Version

-

Onnx name: Floor

-

This version of the operator has been available since -version 1.

-

Summary

-

Floor takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the floor is, y = floor(x), is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxFloor_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFloor_13(*args, **kwargs)#
-

Version

-

Onnx name: Floor

-

This version of the operator has been available since -version 13.

-

Summary

-

Floor takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the floor is, y = floor(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxFloor_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxFloor_6(*args, **kwargs)#
-

Version

-

Onnx name: Floor

-

This version of the operator has been available since -version 6.

-

Summary

-

Floor takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the floor is, y = floor(x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGRU#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGRU(*args, **kwargs)#
-

Version

-

Onnx name: GRU

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer GRU. This operator is usually supported via some custom -implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

z - update gate

-

r - reset gate

-

h - hidden gate

-

t - time step (t-1 means previous time step)

-

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

-

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

-

Wb[zrh] - W bias vectors for update, reset, and hidden gates

-

Rb[zrh] - R bias vectors for update, reset, and hidden gates

-

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

-

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

-

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

-

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh):

-
-
    -
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • -
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • -
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is -name: "linear_before_reset" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxGRU_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGRU_1(*args, **kwargs)#
-

Version

-

Onnx name: GRU

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes an one-layer GRU. This operator is usually supported via some custom -implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

z - update gate

-

r - reset gate

-

h - hidden gate

-

t - time step (t-1 means previous time step)

-

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

-

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

-

Wb[zrh] - W bias vectors for update, reset, and hidden gates

-

Rb[zrh] - R bias vectors for update, reset, and hidden gates

-

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

-

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

-

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

-

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh):

-
-
    -
  • zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)

  • -
  • rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0

  • -
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • -
-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "foward" type: STRING

  • -
  • -
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is -name: "output_sequence" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • -
  • Y_h (heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxGRU_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGRU_14(*args, **kwargs)#
-

Version

-

Onnx name: GRU

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer GRU. This operator is usually supported via some custom -implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

z - update gate

-

r - reset gate

-

h - hidden gate

-

t - time step (t-1 means previous time step)

-

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

-

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

-

Wb[zrh] - W bias vectors for update, reset, and hidden gates

-

Rb[zrh] - R bias vectors for update, reset, and hidden gates

-

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

-

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

-

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

-

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh):

-
-
    -
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • -
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • -
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is -name: "linear_before_reset" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxGRU_3#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGRU_3(*args, **kwargs)#
-

Version

-

Onnx name: GRU

-

This version of the operator has been available since -version 3.

-

Summary

-

Computes an one-layer GRU. This operator is usually supported via some custom -implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

z - update gate

-

r - reset gate

-

h - hidden gate

-

t - time step (t-1 means previous time step)

-

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

-

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

-

Wb[zrh] - W bias vectors for update, reset, and hidden gates

-

Rb[zrh] - R bias vectors for update, reset, and hidden gates

-

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

-

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

-

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

-

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh):

-
-
    -
  • zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)

  • -
  • rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0

  • -
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • -
-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is -name: "linear_before_reset" i: 0 type: INT

  • -
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is -name: "output_sequence" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxGRU_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGRU_7(*args, **kwargs)#
-

Version

-

Onnx name: GRU

-

This version of the operator has been available since -version 7.

-

Summary

-

Computes an one-layer GRU. This operator is usually supported via some custom -implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

z - update gate

-

r - reset gate

-

h - hidden gate

-

t - time step (t-1 means previous time step)

-

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

-

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

-

Wb[zrh] - W bias vectors for update, reset, and hidden gates

-

Rb[zrh] - R bias vectors for update, reset, and hidden gates

-

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

-

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

-

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

-

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh):

-
-
    -
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • -
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • -
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • -
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is -name: "linear_before_reset" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxGather#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGather(*args, **kwargs)#
-

Version

-

Onnx name: Gather

-

This version of the operator has been available since -version 13.

-

Summary

-

Given data tensor of rank r >= 1, and indices tensor of rank q, gather -entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates -them in an output tensor of rank q + (r - 1).

-

axis = 0 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2],
-    [2.3, 3.4],
-    [4.5, 5.7],
-]
-indices = [
-    [0, 1],
-    [1, 2],
-]
-output = [
-    [
-        [1.0, 1.2],
-        [2.3, 3.4],
-    ],
-    [
-        [2.3, 3.4],
-        [4.5, 5.7],
-    ],
-]
-
-
-

axis = 1 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2, 1.9],
-    [2.3, 3.4, 3.9],
-    [4.5, 5.7, 5.9],
-]
-indices = [
-    [0, 2],
-]
-axis = 1,
-output = [
-        [[1.0, 1.9]],
-        [[2.3, 3.9]],
-        [[4.5, 5.9]],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGatherElements#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherElements(*args, **kwargs)#
-

Version

-

Onnx name: GatherElements

-

This version of the operator has been available since -version 13.

-

Summary

-

GatherElements takes two inputs data and indices of the same rank r >= 1 -and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). It is an indexing operation -that produces its output by indexing into the input data tensor at index -positions determined by elements of the indices tensor. -Its output shape is the same as the shape of indices and consists of one value -(gathered from the data) for each element in indices.

-

For instance, in the 3-D case (r = 3), the output produced is determined -by the following equations:

-
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
-out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
-out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
-
-
-

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

-

Example 1:

-
data = [
-    [1, 2],
-    [3, 4],
-]
-indices = [
-    [0, 0],
-    [1, 0],
-]
-axis = 1
-output = [
-    [1, 1],
-    [4, 3],
-]
-
-
-

Example 2:

-
data = [
-    [1, 2, 3],
-    [4, 5, 6],
-    [7, 8, 9],
-]
-indices = [
-    [1, 2, 0],
-    [2, 0, 0],
-]
-axis = 0
-output = [
-    [4, 8, 3],
-    [7, 2, 3],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGatherElements_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherElements_11(*args, **kwargs)#
-

Version

-

Onnx name: GatherElements

-

This version of the operator has been available since -version 11.

-

Summary

-

GatherElements takes two inputs data and indices of the same rank r >= 1 -and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). It is an indexing operation -that produces its output by indexing into the input data tensor at index -positions determined by elements of the indices tensor. -Its output shape is the same as the shape of indices and consists of one value -(gathered from the data) for each element in indices.

-

For instance, in the 3-D case (r = 3), the output produced is determined -by the following equations:

-
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
-out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
-out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
-
-
-

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

-

Example 1:

-
data = [
-    [1, 2],
-    [3, 4],
-]
-indices = [
-    [0, 0],
-    [1, 0],
-]
-axis = 1
-output = [
-    [
-      [1, 1],
-      [4, 3],
-    ],
-]
-
-
-

Example 2:

-
data = [
-    [1, 2, 3],
-    [4, 5, 6],
-    [7, 8, 9],
-]
-indices = [
-    [1, 2, 0],
-    [2, 0, 0],
-]
-axis = 0
-output = [
-    [
-      [4, 8, 3],
-      [7, 2, 3],
-    ],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGatherElements_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherElements_13(*args, **kwargs)#
-

Version

-

Onnx name: GatherElements

-

This version of the operator has been available since -version 13.

-

Summary

-

GatherElements takes two inputs data and indices of the same rank r >= 1 -and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). It is an indexing operation -that produces its output by indexing into the input data tensor at index -positions determined by elements of the indices tensor. -Its output shape is the same as the shape of indices and consists of one value -(gathered from the data) for each element in indices.

-

For instance, in the 3-D case (r = 3), the output produced is determined -by the following equations:

-
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
-out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
-out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
-
-
-

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

-

Example 1:

-
data = [
-    [1, 2],
-    [3, 4],
-]
-indices = [
-    [0, 0],
-    [1, 0],
-]
-axis = 1
-output = [
-    [1, 1],
-    [4, 3],
-]
-
-
-

Example 2:

-
data = [
-    [1, 2, 3],
-    [4, 5, 6],
-    [7, 8, 9],
-]
-indices = [
-    [1, 2, 0],
-    [2, 0, 0],
-]
-axis = 0
-output = [
-    [4, 8, 3],
-    [7, 2, 3],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGatherND#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherND(*args, **kwargs)#
-

Version

-

Onnx name: GatherND

-

This version of the operator has been available since -version 13.

-

Summary

-

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers -slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

-

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, -where each element defines a slice of data

-

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of -data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

-

Some salient points about the inputs’ rank and shape:

-
    -
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. -
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. -
  5. b < min(q, r) is to be honored.

  6. -
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. -
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. -It is an error if any of the index values are out of bounds.

  10. -
-

The output is computed as follows:

-

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

-
    -
  1. If indices_shape[-1] > r-b => error condition

  2. -
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors -containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions -of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] -is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. -
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor -containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding -to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor -to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. -
-

This operator is the inverse of ScatterND.

-

Example 1

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

-

output = [0,3] # output_shape = [2]

-
-

Example 2

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[0,1]] # output_shape = [2, 2]

-
-

Example 3

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Example 4

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

-

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

-
-

Example 5

-
-

batch_dims = 1

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Attributes

-
    -
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is -name: "batch_dims" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxGatherND_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherND_11(*args, **kwargs)#
-

Version

-

Onnx name: GatherND

-

This version of the operator has been available since -version 11.

-

Summary

-

Given data tensor of rank r >= 1, and indices tensor of rank q >= 1, this operator gathers -slices of data into an output tensor of rank q + r - indices_shape[-1] - 1.

-

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, -where each element defines a slice of data

-

Some salient points about the inputs’ rank and shape:

-
    -
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. -
  3. The indices_shape[-1] should have a value between 1 (inclusive) and rank r (inclusive)

  4. -
  5. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. -It is an error if any of the index values are out of bounds.

  6. -
-

The output is computed as follows:

-

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

-
    -
  1. If indices_shape[-1] > r => error condition

  2. -
  3. If indices_shape[-1] == r, since the rank of indices is q, indices can be thought of as a (q-1)-dimensional tensor -containing 1-D tensors of dimension r. Let us think of each such r ranked tensor as indices_slice. -Each scalar value corresponding to data[indices_slice] is filled into the corresponding location of the (q-1)-dimensional tensor -to form the output tensor (Example 1 below)

  4. -
  5. If indices_shape[-1] < r, since the rank of indices is q, indices can be thought of as a (q-1)-dimensional tensor -containing 1-D tensors of dimension < r. Let us think of each such tensors as indices_slice. -Each tensor slice corresponding to data[indices_slice , :] is filled into the corresponding location of the (q-1)-dimensional tensor -to form the output tensor (Examples 2, 3, and 4 below)

  6. -
-

This operator is the inverse of ScatterND.

-

Example 1

-
-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

-

output = [0,3] # output_shape = [2]

-
-

Example 2

-
-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[0,1]] # output_shape = [2, 2]

-
-

Example 3

-
-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Example 4

-
-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

-

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

-
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxGatherND_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherND_12(*args, **kwargs)#
-

Version

-

Onnx name: GatherND

-

This version of the operator has been available since -version 12.

-

Summary

-

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers -slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

-

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, -where each element defines a slice of data

-

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of -data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

-

Some salient points about the inputs’ rank and shape:

-
    -
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. -
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. -
  5. b < min(q, r) is to be honored.

  6. -
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. -
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. -It is an error if any of the index values are out of bounds.

  10. -
-

The output is computed as follows:

-

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

-
    -
  1. If indices_shape[-1] > r-b => error condition

  2. -
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors -containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions -of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] -is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. -
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor -containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding -to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor -to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. -
-

This operator is the inverse of ScatterND.

-

Example 1

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

-

output = [0,3] # output_shape = [2]

-
-

Example 2

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[0,1]] # output_shape = [2, 2]

-
-

Example 3

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Example 4

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

-

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

-
-

Example 5

-
-

batch_dims = 1

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Attributes

-
    -
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is -name: "batch_dims" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxGatherND_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGatherND_13(*args, **kwargs)#
-

Version

-

Onnx name: GatherND

-

This version of the operator has been available since -version 13.

-

Summary

-

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers -slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

-

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, -where each element defines a slice of data

-

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of -data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

-

Some salient points about the inputs’ rank and shape:

-
    -
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. -
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. -
  5. b < min(q, r) is to be honored.

  6. -
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. -
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. -It is an error if any of the index values are out of bounds.

  10. -
-

The output is computed as follows:

-

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

-
    -
  1. If indices_shape[-1] > r-b => error condition

  2. -
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors -containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions -of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] -is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. -
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor -containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding -to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor -to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. -
-

This operator is the inverse of ScatterND.

-

Example 1

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

-

output = [0,3] # output_shape = [2]

-
-

Example 2

-
-

batch_dims = 0

-

data = [[0,1],[2,3]] # data_shape = [2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[0,1]] # output_shape = [2, 2]

-
-

Example 3

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Example 4

-
-

batch_dims = 0

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

-

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

-
-

Example 5

-
-

batch_dims = 1

-

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

-

indices = [[1],[0]] # indices_shape = [2, 1]

-

output = [[2,3],[4,5]] # output_shape = [2, 2]

-
-

Attributes

-
    -
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is -name: "batch_dims" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxGather_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGather_1(*args, **kwargs)#
-

Version

-

Onnx name: Gather

-

This version of the operator has been available since -version 1.

-

Summary

-

Given data tensor of rank r >= 1, and indices tensor of rank q, gather -entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates -them in an output tensor of rank q + (r - 1). -Example 1:

-
data = [
-    [1.0, 1.2],
-    [2.3, 3.4],
-    [4.5, 5.7],
-]
-indices = [
-    [0, 1],
-    [1, 2],
-]
-output = [
-    [
-        [1.0, 1.2],
-        [2.3, 3.4],
-    ],
-    [
-        [2.3, 3.4],
-        [4.5, 5.7],
-    ],
-]
-
-
-

Example 2:

-
data = [
-    [1.0, 1.2, 1.9],
-    [2.3, 3.4, 3.9],
-    [4.5, 5.7, 5.9],
-]
-indices = [
-    [0, 2],
-]
-axis = 1,
-output = [
-    [
-        [1.0, 1.9],
-        [2.3, 3.9],
-        [4.5, 5.9],
-    ],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGather_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGather_11(*args, **kwargs)#
-

Version

-

Onnx name: Gather

-

This version of the operator has been available since -version 11.

-

Summary

-

Given data tensor of rank r >= 1, and indices tensor of rank q, gather -entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates -them in an output tensor of rank q + (r - 1).

-

axis = 0 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2],
-    [2.3, 3.4],
-    [4.5, 5.7],
-]
-indices = [
-    [0, 1],
-    [1, 2],
-]
-output = [
-    [
-        [1.0, 1.2],
-        [2.3, 3.4],
-    ],
-    [
-        [2.3, 3.4],
-        [4.5, 5.7],
-    ],
-]
-
-
-

axis = 1 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2, 1.9],
-    [2.3, 3.4, 3.9],
-    [4.5, 5.7, 5.9],
-]
-indices = [
-    [0, 2],
-]
-axis = 1,
-output = [
-    [
-        [1.0, 1.9],
-        [2.3, 3.9],
-        [4.5, 5.9],
-    ],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGather_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGather_13(*args, **kwargs)#
-

Version

-

Onnx name: Gather

-

This version of the operator has been available since -version 13.

-

Summary

-

Given data tensor of rank r >= 1, and indices tensor of rank q, gather -entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates -them in an output tensor of rank q + (r - 1).

-

axis = 0 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2],
-    [2.3, 3.4],
-    [4.5, 5.7],
-]
-indices = [
-    [0, 1],
-    [1, 2],
-]
-output = [
-    [
-        [1.0, 1.2],
-        [2.3, 3.4],
-    ],
-    [
-        [2.3, 3.4],
-        [4.5, 5.7],
-    ],
-]
-
-
-

axis = 1 :

-

Let -k = indices[i_{0}, …, i_{q-1}] -Then -output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]

-
data = [
-    [1.0, 1.2, 1.9],
-    [2.3, 3.4, 3.9],
-    [4.5, 5.7, 5.9],
-]
-indices = [
-    [0, 2],
-]
-axis = 1,
-output = [
-        [[1.0, 1.9]],
-        [[2.3, 3.9]],
-        [[4.5, 5.9]],
-]
-
-
-

Attributes

-
    -
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxGemm#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 13.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

-

A’ = transpose(A) if transA else A

-

B’ = transpose(B) if transB else B

-

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), -input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), -and output tensor Y has shape (M, N). A will be transposed before doing the -computation if attribute transA is non-zero, same for B and transB. -This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • -
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • -
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_1(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 1.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 -Compute Y = alpha * A * B + beta * C, where input tensor A has -dimension (M X K), input tensor B has dimension (K X N), input tensor C and -output tensor Y have dimension (M X N). -If attribute broadcast is non-zero, input tensor C will be broadcasted to match -the dimension requirement. A will be transposed before doing the computation -if attribute transA is non-zero, same for B and transB.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B, the default value is 1.0. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C, the default value is 1.0. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • broadcast: Whether C should be broadcasted Default value is -name: "broadcast" i: 0 type: INT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Input tensor A

  • -
  • B (heterogeneous)T: Input tensor B

  • -
  • C (heterogeneous)T: Input tensor C, can be inplace.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_11(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 11.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

-

A’ = transpose(A) if transA else A

-

B’ = transpose(B) if transB else B

-

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), -input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), -and output tensor Y has shape (M, N). A will be transposed before doing the -computation if attribute transA is non-zero, same for B and transB. -This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • -
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • -
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_13(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 13.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

-

A’ = transpose(A) if transA else A

-

B’ = transpose(B) if transB else B

-

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), -input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), -and output tensor Y has shape (M, N). A will be transposed before doing the -computation if attribute transA is non-zero, same for B and transB. -This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. -This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • -
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • -
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_6(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 6.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 -Compute Y = alpha * A * B + beta * C, where input tensor A has -dimension (M X K), input tensor B has dimension (K X N), input tensor C and -output tensor Y have dimension (M X N). -If attribute broadcast is non-zero, input tensor C will be broadcasted to match -the dimension requirement. A will be transposed before doing the computation -if attribute transA is non-zero, same for B and transB.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B, the default value is 1.0. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C, the default value is 1.0. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • broadcast: Whether C should be broadcasted Default value is -name: "broadcast" i: 0 type: INT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Input tensor A

  • -
  • B (heterogeneous)T: Input tensor B

  • -
  • C (heterogeneous)T: Input tensor C

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_7(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 7.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

-

A’ = transpose(A) if transA else A

-

B’ = transpose(B) if transB else B

-

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), -input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), -and output tensor Y has shape (M, N). A will be transposed before doing the -computation if attribute transA is non-zero, same for B and transB. -This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • -
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • -
  • C (heterogeneous)T: Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGemm_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGemm_9(*args, **kwargs)#
-

Version

-

Onnx name: Gemm

-

This version of the operator has been available since -version 9.

-

Summary

-

General Matrix multiplication: -https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

-

A’ = transpose(A) if transA else A

-

B’ = transpose(B) if transB else B

-

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), -input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), -and output tensor Y has shape (M, N). A will be transposed before doing the -computation if attribute transA is non-zero, same for B and transB. -This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
  • beta: Scalar multiplier for input tensor C. Default value is -name: "beta" f: 1.0 type: FLOAT

  • -
  • transA: Whether A should be transposed Default value is -name: "transA" i: 0 type: INT

  • -
  • transB: Whether B should be transposed Default value is -name: "transB" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • -
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • -
  • C (heterogeneous)T: Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalAveragePool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool(*args, **kwargs)#
-

Version

-

Onnx name: GlobalAveragePool

-

This version of the operator has been available since -version 1.

-

Summary

-

GlobalAveragePool consumes an input tensor X and applies average pooling across -the values in the same channel. This is equivalent to AveragePool with kernel size -equal to the spatial dimension of input tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalAveragePool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool_1(*args, **kwargs)#
-

Version

-

Onnx name: GlobalAveragePool

-

This version of the operator has been available since -version 1.

-

Summary

-

GlobalAveragePool consumes an input tensor X and applies average pooling across -the values in the same channel. This is equivalent to AveragePool with kernel size -equal to the spatial dimension of input tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalLpPool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool(*args, **kwargs)#
-

Version

-

Onnx name: GlobalLpPool

-

This version of the operator has been available since -version 2.

-

Summary

-

GlobalLpPool consumes an input tensor X and applies lp pool pooling across -the values in the same channel. This is equivalent to LpPool with kernel size -equal to the spatial dimension of input tensor.

-

Attributes

-
    -
  • p: p value of the Lp norm used to pool over the input data. Default value is -name: "p" i: 2 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalLpPool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_1(*args, **kwargs)#
-

Version

-

Onnx name: GlobalLpPool

-

This version of the operator has been available since -version 1.

-

Summary

-

GlobalLpPool consumes an input tensor X and applies lp pool pooling across the -the values in the same channel. This is equivalent to LpPool with kernel size -equal to the spatial dimension of input tensor.

-

Attributes

-
    -
  • p: p value of the Lp norm used to pool over the input data, default is 2.0. Default value is -name: "p" f: 2.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. Dimensions will be N x C x 1 x 1

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalLpPool_2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_2(*args, **kwargs)#
-

Version

-

Onnx name: GlobalLpPool

-

This version of the operator has been available since -version 2.

-

Summary

-

GlobalLpPool consumes an input tensor X and applies lp pool pooling across -the values in the same channel. This is equivalent to LpPool with kernel size -equal to the spatial dimension of input tensor.

-

Attributes

-
    -
  • p: p value of the Lp norm used to pool over the input data. Default value is -name: "p" i: 2 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalMaxPool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool(*args, **kwargs)#
-

Version

-

Onnx name: GlobalMaxPool

-

This version of the operator has been available since -version 1.

-

Summary

-

GlobalMaxPool consumes an input tensor X and applies max pooling across -the values in the same channel. This is equivalent to MaxPool with kernel size -equal to the spatial dimension of input tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGlobalMaxPool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool_1(*args, **kwargs)#
-

Version

-

Onnx name: GlobalMaxPool

-

This version of the operator has been available since -version 1.

-

Summary

-

GlobalMaxPool consumes an input tensor X and applies max pooling across -the values in the same channel. This is equivalent to MaxPool with kernel size -equal to the spatial dimension of input tensor.

-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGradient#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGradient(*args, **kwargs)#
-

Version

-

Onnx name: Gradient

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Gradient operator computes the partial derivatives of a specific tensor w.r.t. -some other tensors. This operator is widely used in gradient-based training -algorithms. To illustrate its use, let’s consider a computation graph,

-
X -----.
-       |
-       v
-W --> Conv --> H --> Gemm --> Y
-                      ^
-                      |
-                      Z
-
-
-

, where W and Z are trainable tensors. Note that operators’ attributes are -omitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of -Y with respect to W (Z). The user can compute gradient by inserting Gradient -operator to form another graph shown below.

-
W --> Conv --> H --> Gemm --> Y
-|      ^              ^
-|      |              |
-|      X              Z
-|      |              |
-|      |   .----------'
-|      |   |  (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in
-|      |   |   "xs" followed by "zs")
-|      v   v
-'---> Gradient(xs=["W", "Z"], zs=["X"], y="Y")
-       |   |
-       |   '-----------------------------------> dY/dW (1st output of Gradient)
-       |
-       '---------------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

By definition, the tensor “y” is a function of independent variables in “xs” -and “zs”. Since we only compute the gradient of “y” w.r.t. the differentiable -variables in “xs”, this Gradient only outputs dY/dW and dY/dZ. Note that “H” -cannot appear in “xs” and “zs”. The reason is that “H” can be determined by -tensors “W” and “X” and therefore “H” is not an independent variable.

-

All outputs are optional. If needed, for example, user can assign an empty -string to the 1st output name of that Gradient to skip the generation of dY/dW. -Note that the concept of optional outputs can also be found in ONNX’s RNN, GRU, -and LSTM.

-

Gradient operator can compute derivative against intermediate tensors. For -example, the gradient of Y with respect to H can be done via

-
W --> Conv --> H --> Gemm --> Y
-       ^       |      ^
-       |       |      |
-       X       |      Z
-       .-------'      |
-       |   .----------'
-       |   | (H/Z is the 1st/2nd input of Gradient as shown in "xs")
-       v   v
-      Gradient(xs=["H", "Z"], y="Y")
-       |   |
-       |   '-----------------------------------> dY/dH (1st output of Gradient)
-       |
-       '---------------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

It is possible to represent high-order differentiation using Gradient operators. -For example, given the following linear model:

-
W --> Gemm --> Y --> Loss --> O
-       ^              ^
-       |              |
-       X              L
-
-
-

To compute the 2nd order derivative of O with respect to W (denoted by -d^2O/dW^2), one can do

-
W --> Gemm --> Y --> Loss --> O
-|      ^              ^
-|      |              |
-|      X .------------L
-|      | |            |
-|      | |            v
-+------+-+> Gradient(xs=["X", "W"], zs=["L"], y="O") ---> dO/dX (1st output of Gradient)
-|      | |    |
-|      | |    '---> dO/dW (2nd output of Gradient)
-|      v v
-'---> Gradient(xs=["X", "W"], zs=["L"], y="dO/dW") ---> d(dO/dW)dX (1st output of
-       |                                                  Gradient)
-       |
-       |
-       '---> d^2O/dW^2 (2nd output of Gradient)
-
-
-

The tensors named in attributes “xs”, “zs”, and “y” define the differentiated -computation graph, and the inputs to Gradient node define the values at -which the gradient is computed. We can feed different tensors to the identified -graph. For example, one can compute the gradient of Y with respect to H at -a specific value of H, H_1, by providing that value as an input to the Gradient -node.

-
W --> Conv --> H --> Gemm --> Y
-       ^              ^
-       |              |
-       X              Z
-
-          Z_1 (2nd input of Gradient)
-           |
-           v
-H_1 --> Gradient(xs=["H", "Z"], y="Y") ---> dY/dH when H = H_1 and Y = Y_1.
-           |
-           '------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

When the inputs of Gradient are the tensors named in “xs” and “zs”, the -computation can be optimized. More specifically, intermediate variables in -forward pass can be reused if the gradient is computed via reverse-mode -auto-differentiation.

-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • Inputs (variadic)T1: The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute “xs” and the attribute “zs”. For example, if xs=[“A”, “B”] and zs=[“C”], the first input is used as the value of symbol “A” and the 3rd input is substituted for all the occurrences of “C”.

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • Outputs (variadic)T2: The gradient of the tensor specified by the attribute “y” with respect to each of tensors specified in the attribute “xs”. The i-th output is the gradient of “y” with respect to the i-th tensor specified in the attribute “xs”.

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Allow outputs to be any kind of tensor.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Allow inputs to be any kind of floating-point tensor.

  • -
-
- -
-
-
-
-

OnnxGradient_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGradient_1(*args, **kwargs)#
-

Version

-

Onnx name: Gradient

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Gradient operator computes the partial derivatives of a specific tensor w.r.t. -some other tensors. This operator is widely used in gradient-based training -algorithms. To illustrate its use, let’s consider a computation graph,

-
X -----.
-       |
-       v
-W --> Conv --> H --> Gemm --> Y
-                      ^
-                      |
-                      Z
-
-
-

, where W and Z are trainable tensors. Note that operators’ attributes are -omitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of -Y with respect to W (Z). The user can compute gradient by inserting Gradient -operator to form another graph shown below.

-
W --> Conv --> H --> Gemm --> Y
-|      ^              ^
-|      |              |
-|      X              Z
-|      |              |
-|      |   .----------'
-|      |   |  (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in
-|      |   |   "xs" followed by "zs")
-|      v   v
-'---> Gradient(xs=["W", "Z"], zs=["X"], y="Y")
-       |   |
-       |   '-----------------------------------> dY/dW (1st output of Gradient)
-       |
-       '---------------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

By definition, the tensor “y” is a function of independent variables in “xs” -and “zs”. Since we only compute the gradient of “y” w.r.t. the differentiable -variables in “xs”, this Gradient only outputs dY/dW and dY/dZ. Note that “H” -cannot appear in “xs” and “zs”. The reason is that “H” can be determined by -tensors “W” and “X” and therefore “H” is not an independent variable.

-

All outputs are optional. If needed, for example, user can assign an empty -string to the 1st output name of that Gradient to skip the generation of dY/dW. -Note that the concept of optional outputs can also be found in ONNX’s RNN, GRU, -and LSTM.

-

Gradient operator can compute derivative against intermediate tensors. For -example, the gradient of Y with respect to H can be done via

-
W --> Conv --> H --> Gemm --> Y
-       ^       |      ^
-       |       |      |
-       X       |      Z
-       .-------'      |
-       |   .----------'
-       |   | (H/Z is the 1st/2nd input of Gradient as shown in "xs")
-       v   v
-      Gradient(xs=["H", "Z"], y="Y")
-       |   |
-       |   '-----------------------------------> dY/dH (1st output of Gradient)
-       |
-       '---------------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

It is possible to represent high-order differentiation using Gradient operators. -For example, given the following linear model:

-
W --> Gemm --> Y --> Loss --> O
-       ^              ^
-       |              |
-       X              L
-
-
-

To compute the 2nd order derivative of O with respect to W (denoted by -d^2O/dW^2), one can do

-
W --> Gemm --> Y --> Loss --> O
-|      ^              ^
-|      |              |
-|      X .------------L
-|      | |            |
-|      | |            v
-+------+-+> Gradient(xs=["X", "W"], zs=["L"], y="O") ---> dO/dX (1st output of Gradient)
-|      | |    |
-|      | |    '---> dO/dW (2nd output of Gradient)
-|      v v
-'---> Gradient(xs=["X", "W"], zs=["L"], y="dO/dW") ---> d(dO/dW)dX (1st output of
-       |                                                  Gradient)
-       |
-       |
-       '---> d^2O/dW^2 (2nd output of Gradient)
-
-
-

The tensors named in attributes “xs”, “zs”, and “y” define the differentiated -computation graph, and the inputs to Gradient node define the values at -which the gradient is computed. We can feed different tensors to the identified -graph. For example, one can compute the gradient of Y with respect to H at -a specific value of H, H_1, by providing that value as an input to the Gradient -node.

-
W --> Conv --> H --> Gemm --> Y
-       ^              ^
-       |              |
-       X              Z
-
-          Z_1 (2nd input of Gradient)
-           |
-           v
-H_1 --> Gradient(xs=["H", "Z"], y="Y") ---> dY/dH when H = H_1 and Y = Y_1.
-           |
-           '------------------------------> dY/dZ (2nd output of Gradient)
-
-
-

When the inputs of Gradient are the tensors named in “xs” and “zs”, the -computation can be optimized. More specifically, intermediate variables in -forward pass can be reused if the gradient is computed via reverse-mode -auto-differentiation.

-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • Inputs (variadic)T1: The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute “xs” and the attribute “zs”. For example, if xs=[“A”, “B”] and zs=[“C”], the first input is used as the value of symbol “A” and the 3rd input is substituted for all the occurrences of “C”.

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • Outputs (variadic)T2: The gradient of the tensor specified by the attribute “y” with respect to each of tensors specified in the attribute “xs”. The i-th output is the gradient of “y” with respect to the i-th tensor specified in the attribute “xs”.

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Allow outputs to be any kind of tensor.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Allow inputs to be any kind of floating-point tensor.

  • -
-
- -
-
-
-
-

OnnxGreater#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreater(*args, **kwargs)#
-

Version

-

Onnx name: Greater

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the greater logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreaterOrEqual#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual(*args, **kwargs)#
-

Version

-

Onnx name: GreaterOrEqual

-

This version of the operator has been available since -version 16.

-

Summary

-

Returns the tensor resulted from performing the greater_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreaterOrEqual_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_12(*args, **kwargs)#
-

Version

-

Onnx name: GreaterOrEqual

-

This version of the operator has been available since -version 12.

-

Summary

-

Returns the tensor resulted from performing the greater_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreaterOrEqual_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_16(*args, **kwargs)#
-

Version

-

Onnx name: GreaterOrEqual

-

This version of the operator has been available since -version 16.

-

Summary

-

Returns the tensor resulted from performing the greater_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreater_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreater_1(*args, **kwargs)#
-

Version

-

Onnx name: Greater

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the greater logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrains input to float tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreater_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreater_13(*args, **kwargs)#
-

Version

-

Onnx name: Greater

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the greater logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreater_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreater_7(*args, **kwargs)#
-

Version

-

Onnx name: Greater

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the greater logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrains input to float tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGreater_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGreater_9(*args, **kwargs)#
-

Version

-

Onnx name: Greater

-

This version of the operator has been available since -version 9.

-

Summary

-

Returns the tensor resulted from performing the greater logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxGridSample#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGridSample(*args, **kwargs)#
-

Version

-

Onnx name: GridSample

-

This version of the operator has been available since -version 16.

-

Summary

-

Given an input and a flow-field grid, computes the output using input values and pixel locations from grid. -Currently, only spatial (4-D) inputs are supported. For input with shape (N, C, H, W) and grid with shape (N, H_out, W_out, 2), -the output will have shape (N, C, H_out, W_out). -For each output location output[N, C, H_out, W_out], the size-2 vector grid[N, H_out, W_out] specifies input pixel locations x and y, -which are used to interpolate the output value output[N, C, H_out, W_out].

-

The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). -See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).

-

Attributes

-
    -
  • align_corners: If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default value is -name: "align_corners" i: 0 type: INT

  • -
  • mode: Three interpolation modes: bilinear (default), nearest and bicubic. Default value is -name: "mode" s: "bilinear" type: STRING

  • -
  • padding_mode: Support padding modes for outside grid values: zeros`(default), `border, reflection. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x’ = 1.5, then reflects by border 1 and becomes x’’ = 0.5. Default value is -name: "padding_mode" s: "zeros" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the numbers of channels, H and W are the height and width of the input data.

  • -
  • grid (heterogeneous)T1: Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and W_out are the height and width of grid and output, Grid specifies the sampling pixel locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: 4-D tensor of shape (N, C, H_out, W_out).

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxGridSample_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxGridSample_16(*args, **kwargs)#
-

Version

-

Onnx name: GridSample

-

This version of the operator has been available since -version 16.

-

Summary

-

Given an input and a flow-field grid, computes the output using input values and pixel locations from grid. -Currently, only spatial (4-D) inputs are supported. For input with shape (N, C, H, W) and grid with shape (N, H_out, W_out, 2), -the output will have shape (N, C, H_out, W_out). -For each output location output[N, C, H_out, W_out], the size-2 vector grid[N, H_out, W_out] specifies input pixel locations x and y, -which are used to interpolate the output value output[N, C, H_out, W_out].

-

The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). -See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).

-

Attributes

-
    -
  • align_corners: If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default value is -name: "align_corners" i: 0 type: INT

  • -
  • mode: Three interpolation modes: bilinear (default), nearest and bicubic. Default value is -name: "mode" s: "bilinear" type: STRING

  • -
  • padding_mode: Support padding modes for outside grid values: zeros`(default), `border, reflection. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x’ = 1.5, then reflects by border 1 and becomes x’’ = 0.5. Default value is -name: "padding_mode" s: "zeros" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the numbers of channels, H and W are the height and width of the input data.

  • -
  • grid (heterogeneous)T1: Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and W_out are the height and width of grid and output, Grid specifies the sampling pixel locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: 4-D tensor of shape (N, C, H_out, W_out).

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardSigmoid#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid(*args, **kwargs)#
-

Version

-

Onnx name: HardSigmoid

-

This version of the operator has been available since -version 6.

-

Summary

-

HardSigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Value of alpha. Default value is -name: "alpha" f: 0.20000000298023224 type: FLOAT

  • -
  • beta: Value of beta. Default value is -name: "beta" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardSigmoid_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_1(*args, **kwargs)#
-

Version

-

Onnx name: HardSigmoid

-

This version of the operator has been available since -version 1.

-

Summary

-

HardSigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Value of alpha default to 0.2 Default value is -name: "alpha" f: 0.20000000298023224 type: FLOAT

  • -
  • beta: Value of beta default to 0.5 Default value is -name: "beta" f: 0.5 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardSigmoid_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_6(*args, **kwargs)#
-

Version

-

Onnx name: HardSigmoid

-

This version of the operator has been available since -version 6.

-

Summary

-

HardSigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Value of alpha. Default value is -name: "alpha" f: 0.20000000298023224 type: FLOAT

  • -
  • beta: Value of beta. Default value is -name: "beta" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardSwish#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardSwish(*args, **kwargs)#
-

Version

-

Onnx name: HardSwish

-

This version of the operator has been available since -version 14.

-

Summary

-

HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where -the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x), -where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardSwish_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardSwish_14(*args, **kwargs)#
-

Version

-

Onnx name: HardSwish

-

This version of the operator has been available since -version 14.

-

Summary

-

HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where -the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x), -where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardmax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardmax(*args, **kwargs)#
-

Version

-

Onnx name: Hardmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the hardmax values for the given input:

-
-

Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise

-
-

The “axis” attribute indicates the dimension along which Hardmax -will be performed. The output tensor has the same shape -and contains the Hardmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension Hardmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardmax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardmax_1(*args, **kwargs)#
-

Version

-

Onnx name: Hardmax

-

This version of the operator has been available since -version 1.

-

Summary

-
-
The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

-
-
-

(batch_size x input_feature_dimensions). The output tensor has the same shape -and contains the hardmax values of the corresponding input.

-

Input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardmax_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardmax_11(*args, **kwargs)#
-

Version

-

Onnx name: Hardmax

-

This version of the operator has been available since -version 11.

-

Summary

-
-
The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch

of the given input.

-
-
-

The input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors. The output tensor has the same shape -and contains the hardmax values of the corresponding input.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxHardmax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxHardmax_13(*args, **kwargs)#
-

Version

-

Onnx name: Hardmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the hardmax values for the given input:

-
-

Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise

-
-

The “axis” attribute indicates the dimension along which Hardmax -will be performed. The output tensor has the same shape -and contains the Hardmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension Hardmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxIdentity#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIdentity(*args, **kwargs)#
-

Version

-

Onnx name: Identity

-

This version of the operator has been available since -version 16.

-

Summary

-

Identity operator

-

Inputs

-
    -
  • input (heterogeneous)V: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)V: Tensor to copy input into.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input and output types to all tensor, sequence, and optional types.

  • -
-
- -
-
-
-
-

OnnxIdentity_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIdentity_1(*args, **kwargs)#
-

Version

-

Onnx name: Identity

-

This version of the operator has been available since -version 1.

-

Summary

-

Identity operator

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor to copy input into.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxIdentity_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIdentity_13(*args, **kwargs)#
-

Version

-

Onnx name: Identity

-

This version of the operator has been available since -version 13.

-

Summary

-

Identity operator

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor to copy input into.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxIdentity_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIdentity_14(*args, **kwargs)#
-

Version

-

Onnx name: Identity

-

This version of the operator has been available since -version 14.

-

Summary

-

Identity operator

-

Inputs

-
    -
  • input (heterogeneous)V: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)V: Tensor to copy input into.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input and output types to all tensor and sequence types.

  • -
-
- -
-
-
-
-

OnnxIdentity_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIdentity_16(*args, **kwargs)#
-

Version

-

Onnx name: Identity

-

This version of the operator has been available since -version 16.

-

Summary

-

Identity operator

-

Inputs

-
    -
  • input (heterogeneous)V: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)V: Tensor to copy input into.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input and output types to all tensor, sequence, and optional types.

  • -
-
- -
-
-
-
-

OnnxIf#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIf(*args, **kwargs)#
-

Version

-

Onnx name: If

-

This version of the operator has been available since -version 16.

-

Summary

-

If conditional

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • cond (heterogeneous)B: Condition for the if

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • -
  • B tensor(bool): Only bool

  • -
-
- -
-
-
-
-

OnnxIf_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIf_1(*args, **kwargs)#
-

Version

-

Onnx name: If

-

This version of the operator has been available since -version 1.

-

Summary

-

If conditional

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • cond (heterogeneous)B: Condition for the if

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same shape and same data type.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
  • B tensor(bool): Only bool

  • -
-
- -
-
-
-
-

OnnxIf_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIf_11(*args, **kwargs)#
-

Version

-

Onnx name: If

-

This version of the operator has been available since -version 11.

-

Summary

-

If conditional

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • cond (heterogeneous)B: Condition for the if

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
  • B tensor(bool): Only bool

  • -
-
- -
-
-
-
-

OnnxIf_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIf_13(*args, **kwargs)#
-

Version

-

Onnx name: If

-

This version of the operator has been available since -version 13.

-

Summary

-

If conditional

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • cond (heterogeneous)B: Condition for the if

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): All Tensor and Sequence types

  • -
  • B tensor(bool): Only bool

  • -
-
- -
-
-
-
-

OnnxIf_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIf_16(*args, **kwargs)#
-

Version

-

Onnx name: If

-

This version of the operator has been available since -version 16.

-

Summary

-

If conditional

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • cond (heterogeneous)B: Condition for the if

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • -
  • B tensor(bool): Only bool

  • -
-
- -
-
-
-
-

OnnxImputer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxImputer(*args, **kwargs)#
-

Version

-

Onnx name: Imputer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Replaces inputs that equal one value with another, leaving all other elements alone.

-

This operator is typically used to replace missing values in situations where they have a canonical -representation, such as -1, 0, NaN, or some extreme value.

-

One and only one of imputed_value_floats or imputed_value_int64s should be defined – floats if the input tensor -holds floats, integers if the input tensor holds integers. The imputed values must all fit within the -width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, -which one depends on whether floats or integers are being processed.

-

The imputed_value attribute length can be 1 element, or it can have one element per input feature. -In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.

-

Attributes

-
    -
  • -
  • -
  • replaced_value_float: A value that needs replacing. Default value is -name: "replaced_value_float" f: 0.0 type: FLOAT

  • -
  • replaced_value_int64: A value that needs replacing. Default value is -name: "replaced_value_int64" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be processed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Imputed output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.

  • -
-
- -
-
-
-
-

OnnxImputer_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxImputer_1(*args, **kwargs)#
-

Version

-

Onnx name: Imputer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Replaces inputs that equal one value with another, leaving all other elements alone.

-

This operator is typically used to replace missing values in situations where they have a canonical -representation, such as -1, 0, NaN, or some extreme value.

-

One and only one of imputed_value_floats or imputed_value_int64s should be defined – floats if the input tensor -holds floats, integers if the input tensor holds integers. The imputed values must all fit within the -width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, -which one depends on whether floats or integers are being processed.

-

The imputed_value attribute length can be 1 element, or it can have one element per input feature. -In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.

-

Attributes

-
    -
  • -
  • -
  • replaced_value_float: A value that needs replacing. Default value is -name: "replaced_value_float" f: 0.0 type: FLOAT

  • -
  • replaced_value_int64: A value that needs replacing. Default value is -name: "replaced_value_int64" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be processed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Imputed output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.

  • -
-
- -
-
-
-
-

OnnxInstanceNormalization#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization(*args, **kwargs)#
-

Version

-

Onnx name: InstanceNormalization

-

This version of the operator has been available since -version 6.

-

Summary

-

Carries out instance normalization as described in the paper -https://arxiv.org/abs/1607.08022.

-

y = scale * (x - mean) / sqrt(variance + epsilon) + B, -where mean and variance are computed per instance per channel.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • -
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output tensor of the same shape as input.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxInstanceNormalization_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_1(*args, **kwargs)#
-

Version

-

Onnx name: InstanceNormalization

-

This version of the operator has been available since -version 1.

-

Summary

-

Carries out instance normalization as described in the paper -https://arxiv.org/abs/1607.08022.

-

y = scale * (x - mean) / sqrt(variance + epsilon) + B, -where mean and variance are computed per instance per channel.

-

Attributes

-
    -
  • -
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input 4-dimensional tensor of shape NCHW.

  • -
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • -
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output 4-dimensional tensor of the same shape as input.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxInstanceNormalization_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_6(*args, **kwargs)#
-

Version

-

Onnx name: InstanceNormalization

-

This version of the operator has been available since -version 6.

-

Summary

-

Carries out instance normalization as described in the paper -https://arxiv.org/abs/1607.08022.

-

y = scale * (x - mean) / sqrt(variance + epsilon) + B, -where mean and variance are computed per instance per channel.

-

Attributes

-
    -
  • epsilon: The epsilon value to use to avoid division by zero. Default value is -name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • -
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output tensor of the same shape as input.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxIsInf#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIsInf(*args, **kwargs)#
-

Version

-

Onnx name: IsInf

-

This version of the operator has been available since -version 10.

-

Summary

-

Map infinity to true and other values to false.

-

Attributes

-
    -
  • detect_negative: (Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false. Default value is -name: "detect_negative" i: 1 type: INT

  • -
  • detect_positive: (Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false. Default value is -name: "detect_positive" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: output

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxIsInf_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIsInf_10(*args, **kwargs)#
-

Version

-

Onnx name: IsInf

-

This version of the operator has been available since -version 10.

-

Summary

-

Map infinity to true and other values to false.

-

Attributes

-
    -
  • detect_negative: (Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false. Default value is -name: "detect_negative" i: 1 type: INT

  • -
  • detect_positive: (Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false. Default value is -name: "detect_positive" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: output

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxIsNaN#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIsNaN(*args, **kwargs)#
-

Version

-

Onnx name: IsNaN

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns which elements of the input are NaN.

-

Inputs

-
    -
  • X (heterogeneous)T1: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: output

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to float tensors.

  • -
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxIsNaN_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIsNaN_13(*args, **kwargs)#
-

Version

-

Onnx name: IsNaN

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns which elements of the input are NaN.

-

Inputs

-
    -
  • X (heterogeneous)T1: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: output

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to float tensors.

  • -
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxIsNaN_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxIsNaN_9(*args, **kwargs)#
-

Version

-

Onnx name: IsNaN

-

This version of the operator has been available since -version 9.

-

Summary

-

Returns which elements of the input are NaN.

-

Inputs

-
    -
  • X (heterogeneous)T1: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: output

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxLRN#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLRN(*args, **kwargs)#
-

Version

-

Onnx name: LRN

-

This version of the operator has been available since -version 13.

-

Summary

-

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). -It normalizes over local input regions. -The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor -of shape (N x C x D1 x D2, …, Dk), its region is -{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

-

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), -where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

-

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

-

Attributes

-
    -
  • alpha: Scaling parameter. Default value is -name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • -
  • beta: The exponent. Default value is -name: "beta" f: 0.75 type: FLOAT

  • -
  • bias: Default value is -name: "bias" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLRN_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLRN_1(*args, **kwargs)#
-

Version

-

Onnx name: LRN

-

This version of the operator has been available since -version 1.

-

Summary

-

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). -It normalizes over local input regions. -The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor -of shape (N x C x D1 x D2, …, Dk), its region is -{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

-

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), -where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

-

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

-

Attributes

-
    -
  • alpha: Scaling parameter. Default value is -name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • -
  • beta: The exponent. Default value is -name: "beta" f: 0.75 type: FLOAT

  • -
  • bias: Default value is -name: "bias" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLRN_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLRN_13(*args, **kwargs)#
-

Version

-

Onnx name: LRN

-

This version of the operator has been available since -version 13.

-

Summary

-

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). -It normalizes over local input regions. -The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor -of shape (N x C x D1 x D2, …, Dk), its region is -{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

-

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), -where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

-

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

-

Attributes

-
    -
  • alpha: Scaling parameter. Default value is -name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • -
  • beta: The exponent. Default value is -name: "beta" f: 0.75 type: FLOAT

  • -
  • bias: Default value is -name: "bias" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLSTM#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLSTM(*args, **kwargs)#
-

Version

-

Onnx name: LSTM

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer LSTM. This operator is usually supported via some -custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

o - output gate

-

f - forget gate

-

c - cell gate

-

t - time step (t-1 means previous time step)

-

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

-

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

-

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

-

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

-

P[iof] - P peephole weight vector for input, output, and forget gates

-

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

-

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

-

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

-

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

-

PB[iof] - P peephole weight vector for backward input, output, and forget gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

-
-
    -
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • -
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • -
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • -
  • Ct = ft (.) Ct-1 + it (.) ct

  • -
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • -
  • Ht = ot (.) h(Ct)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • input_forget: Couple the input and forget gates if 1. Default value is -name: "input_forget" i: 0 type: INT

  • -
  • layout: The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 8 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • -
-

Outputs

-

Between 0 and 3 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxLSTM_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLSTM_1(*args, **kwargs)#
-

Version

-

Onnx name: LSTM

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes an one-layer LSTM. This operator is usually supported via some -custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

o - output gate

-

f - forget gate

-

c - cell gate

-

t - time step (t-1 means previous time step)

-

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

-

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

-

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

-

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

-

P[iof] - P peephole weight vector for input, output, and forget gates

-

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

-

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

-

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

-

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

-

PB[iof] - P peephole weight vector for backward input, output, and forget gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

-
-
    -
  • it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi)

  • -
  • ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf)

  • -
  • ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc)

  • -
  • Ct = ft (.) Ct-1 + it (.) ct

  • -
  • ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo)

  • -
  • Ht = ot (.) h(Ct)

  • -
-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • input_forget: Couple the input and forget gates if 1, default 0. Default value is -name: "input_forget" i: 0 type: INT

  • -
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is -name: "output_sequence" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 8 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • -
-

Outputs

-

Between 0 and 3 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxLSTM_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLSTM_14(*args, **kwargs)#
-

Version

-

Onnx name: LSTM

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer LSTM. This operator is usually supported via some -custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

o - output gate

-

f - forget gate

-

c - cell gate

-

t - time step (t-1 means previous time step)

-

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

-

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

-

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

-

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

-

P[iof] - P peephole weight vector for input, output, and forget gates

-

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

-

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

-

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

-

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

-

PB[iof] - P peephole weight vector for backward input, output, and forget gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

-
-
    -
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • -
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • -
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • -
  • Ct = ft (.) Ct-1 + it (.) ct

  • -
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • -
  • Ht = ot (.) h(Ct)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • input_forget: Couple the input and forget gates if 1. Default value is -name: "input_forget" i: 0 type: INT

  • -
  • layout: The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 8 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • -
-

Outputs

-

Between 0 and 3 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxLSTM_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLSTM_7(*args, **kwargs)#
-

Version

-

Onnx name: LSTM

-

This version of the operator has been available since -version 7.

-

Summary

-

Computes an one-layer LSTM. This operator is usually supported via some -custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

o - output gate

-

f - forget gate

-

c - cell gate

-

t - time step (t-1 means previous time step)

-

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

-

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

-

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

-

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

-

P[iof] - P peephole weight vector for input, output, and forget gates

-

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

-

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

-

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

-

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

-

PB[iof] - P peephole weight vector for backward input, output, and forget gates

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

-
-
    -
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • -
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • -
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • -
  • Ct = ft (.) Ct-1 + it (.) ct

  • -
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • -
  • Ht = ot (.) h(Ct)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • input_forget: Couple the input and forget gates if 1. Default value is -name: "input_forget" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 8 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • -
-

Outputs

-

Between 0 and 3 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxLabelEncoder#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder(*args, **kwargs)#
-

Version

-

Onnx name: LabelEncoder

-

This version of the operator has been available since -version 2 of domain ai.onnx.ml.

-

Summary

-

Maps each element in the input tensor to another value.

-

The mapping is determined by the two parallel attributes, ‘keys_*’ and -‘values_*’ attribute. The i-th value in the specified ‘keys_*’ attribute -would be mapped to the i-th value in the specified ‘values_*’ attribute. It -implies that input’s element type and the element type of the specified -‘keys_*’ should be identical while the output type is identical to the -specified ‘values_*’ attribute. If an input element can not be found in the -specified ‘keys_*’ attribute, the ‘default_*’ that matches the specified -‘values_*’ attribute may be used as its output value.

-

Let’s consider an example which maps a string tensor to an integer tensor. -Assume and ‘keys_strings’ is [“Amy”, “Sally”], ‘values_int64s’ is [5, 6], -and ‘default_int64’ is ‘-1’. The input [“Dori”, “Amy”, “Amy”, “Sally”, -“Sally”] would be mapped to [-1, 5, 5, 6, 6].

-

Since this operator is an one-to-one mapping, its input and output shapes -are the same. Notice that only one of ‘keys_*’/’values_*’ can be set.

-

For key look-up, bit-wise comparison is used so even a float NaN can be -mapped to a value in ‘values_*’ attribute.

-

Attributes

-
    -
  • default_float: A float. Default value is -name: "default_float" f: -0.0 type: FLOAT

  • -
  • default_int64: An integer. Default value is -name: "default_int64" i: -1 type: INT

  • -
  • default_string: A string. Default value is -name: "default_string" s: "_Unused" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data. It can be either tensor or scalar.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Output data.

  • -
-

Type Constraints

-
    -
  • T1 tensor(string), tensor(int64), tensor(float): The input type is a tensor of any shape.

  • -
  • T2 tensor(string), tensor(int64), tensor(float): Output type is determined by the specified ‘values_*’ attribute.

  • -
-
- -
-
-
-
-

OnnxLabelEncoder_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_1(*args, **kwargs)#
-

Version

-

Onnx name: LabelEncoder

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Converts strings to integers and vice versa.

-

If the string default value is set, it will convert integers to strings. -If the int default value is set, it will convert strings to integers.

-

Each operator converts either integers to strings or strings to integers, depending -on which default value attribute is provided. Only one default value attribute -should be defined.

-

When converting from integers to strings, the string is fetched from the -‘classes_strings’ list, by simple indexing.

-

When converting from strings to integers, the string is looked up in the list -and the index at which it is found is used as the converted value.

-

Attributes

-
    -
  • -
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_int64" i: -1 type: INT

  • -
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is -name: "default_string" s: "_Unused" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • -
-

Type Constraints

-
    -
  • T1 tensor(string), tensor(int64): The input type must be a tensor of integers or strings, of any shape.

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, and will have the same shape as the input.

  • -
-
- -
-
-
-
-

OnnxLabelEncoder_2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_2(*args, **kwargs)#
-

Version

-

Onnx name: LabelEncoder

-

This version of the operator has been available since -version 2 of domain ai.onnx.ml.

-

Summary

-

Maps each element in the input tensor to another value.

-

The mapping is determined by the two parallel attributes, ‘keys_*’ and -‘values_*’ attribute. The i-th value in the specified ‘keys_*’ attribute -would be mapped to the i-th value in the specified ‘values_*’ attribute. It -implies that input’s element type and the element type of the specified -‘keys_*’ should be identical while the output type is identical to the -specified ‘values_*’ attribute. If an input element can not be found in the -specified ‘keys_*’ attribute, the ‘default_*’ that matches the specified -‘values_*’ attribute may be used as its output value.

-

Let’s consider an example which maps a string tensor to an integer tensor. -Assume and ‘keys_strings’ is [“Amy”, “Sally”], ‘values_int64s’ is [5, 6], -and ‘default_int64’ is ‘-1’. The input [“Dori”, “Amy”, “Amy”, “Sally”, -“Sally”] would be mapped to [-1, 5, 5, 6, 6].

-

Since this operator is an one-to-one mapping, its input and output shapes -are the same. Notice that only one of ‘keys_*’/’values_*’ can be set.

-

For key look-up, bit-wise comparison is used so even a float NaN can be -mapped to a value in ‘values_*’ attribute.

-

Attributes

-
    -
  • default_float: A float. Default value is -name: "default_float" f: -0.0 type: FLOAT

  • -
  • default_int64: An integer. Default value is -name: "default_int64" i: -1 type: INT

  • -
  • default_string: A string. Default value is -name: "default_string" s: "_Unused" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data. It can be either tensor or scalar.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Output data.

  • -
-

Type Constraints

-
    -
  • T1 tensor(string), tensor(int64), tensor(float): The input type is a tensor of any shape.

  • -
  • T2 tensor(string), tensor(int64), tensor(float): Output type is determined by the specified ‘values_*’ attribute.

  • -
-
- -
-
-
-
-

OnnxLeakyRelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu(*args, **kwargs)#
-

Version

-

Onnx name: LeakyRelu

-

This version of the operator has been available since -version 16.

-

Summary

-

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one -output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, -f(x) = x for x >= 0, is applied to the data tensor elementwise.

-

History -- Version 16 adds bfloat16 to the types allowed.

-

Attributes

-
    -
  • alpha: Coefficient of leakage. Default value is -name: "alpha" f: 0.009999999776482582 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLeakyRelu_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_1(*args, **kwargs)#
-

Version

-

Onnx name: LeakyRelu

-

This version of the operator has been available since -version 1.

-

Summary

-

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one -output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, -f(x) = x for x >= 0, is applied to the data tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of leakage default to 0.01. Default value is -name: "alpha" f: 0.009999999776482582 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLeakyRelu_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_16(*args, **kwargs)#
-

Version

-

Onnx name: LeakyRelu

-

This version of the operator has been available since -version 16.

-

Summary

-

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one -output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, -f(x) = x for x >= 0, is applied to the data tensor elementwise.

-

History -- Version 16 adds bfloat16 to the types allowed.

-

Attributes

-
    -
  • alpha: Coefficient of leakage. Default value is -name: "alpha" f: 0.009999999776482582 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLeakyRelu_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_6(*args, **kwargs)#
-

Version

-

Onnx name: LeakyRelu

-

This version of the operator has been available since -version 6.

-

Summary

-

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one -output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, -f(x) = x for x >= 0, is applied to the data tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of leakage. Default value is -name: "alpha" f: 0.009999999776482582 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLess#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLess(*args, **kwargs)#
-

Version

-

Onnx name: Less

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the less logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLessOrEqual#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual(*args, **kwargs)#
-

Version

-

Onnx name: LessOrEqual

-

This version of the operator has been available since -version 16.

-

Summary

-

Returns the tensor resulted from performing the less_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLessOrEqual_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_12(*args, **kwargs)#
-

Version

-

Onnx name: LessOrEqual

-

This version of the operator has been available since -version 12.

-

Summary

-

Returns the tensor resulted from performing the less_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLessOrEqual_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_16(*args, **kwargs)#
-

Version

-

Onnx name: LessOrEqual

-

This version of the operator has been available since -version 16.

-

Summary

-

Returns the tensor resulted from performing the less_equal logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLess_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLess_1(*args, **kwargs)#
-

Version

-

Onnx name: Less

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the less logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrains input to float tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLess_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLess_13(*args, **kwargs)#
-

Version

-

Onnx name: Less

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the tensor resulted from performing the less logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLess_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLess_7(*args, **kwargs)#
-

Version

-

Onnx name: Less

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the less logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrains input to float tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLess_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLess_9(*args, **kwargs)#
-

Version

-

Onnx name: Less

-

This version of the operator has been available since -version 9.

-

Summary

-

Returns the tensor resulted from performing the less logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input types to all numeric tensors.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxLinearClassifier#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLinearClassifier(*args, **kwargs)#
-

Version

-

Onnx name: LinearClassifier

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Linear classifier

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • multi_class: Indicates whether to do OvR or multinomial (0=OvR is the default). Default value is -name: "multi_class" i: 0 type: INT

  • -
  • post_transform: Indicates the transform to apply to the scores vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Data to be classified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • -
  • Z (heterogeneous)tensor(float): Classification scores ([N,E] - one score for each class and example

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]

  • -
  • T2 tensor(string), tensor(int64): The output will be a tensor of strings or integers.

  • -
-
- -
-
-
-
-

OnnxLinearClassifier_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLinearClassifier_1(*args, **kwargs)#
-

Version

-

Onnx name: LinearClassifier

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Linear classifier

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • multi_class: Indicates whether to do OvR or multinomial (0=OvR is the default). Default value is -name: "multi_class" i: 0 type: INT

  • -
  • post_transform: Indicates the transform to apply to the scores vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Data to be classified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • -
  • Z (heterogeneous)tensor(float): Classification scores ([N,E] - one score for each class and example

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]

  • -
  • T2 tensor(string), tensor(int64): The output will be a tensor of strings or integers.

  • -
-
- -
-
-
-
-

OnnxLinearRegressor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLinearRegressor(*args, **kwargs)#
-

Version

-

Onnx name: LinearRegressor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Generalized linear regression evaluation.

-

If targets is set to 1 (default) then univariate regression is performed.

-

If targets is set to M then M sets of coefficients must be passed in as a sequence -and M results will be output for each input n in N.

-

The coefficients array is of length n, and the coefficients for each target are contiguous. -Intercepts are optional but if provided must match the number of targets.

-

Attributes

-
    -
  • -
  • -
  • post_transform: Indicates the transform to apply to the regression output vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • targets: The total number of regression targets, 1 if not defined. Default value is -name: "targets" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be regressed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Regression outputs (one per target, per example).

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxLinearRegressor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLinearRegressor_1(*args, **kwargs)#
-

Version

-

Onnx name: LinearRegressor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Generalized linear regression evaluation.

-

If targets is set to 1 (default) then univariate regression is performed.

-

If targets is set to M then M sets of coefficients must be passed in as a sequence -and M results will be output for each input n in N.

-

The coefficients array is of length n, and the coefficients for each target are contiguous. -Intercepts are optional but if provided must match the number of targets.

-

Attributes

-
    -
  • -
  • -
  • post_transform: Indicates the transform to apply to the regression output vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • targets: The total number of regression targets, 1 if not defined. Default value is -name: "targets" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be regressed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Regression outputs (one per target, per example).

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxLog#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLog(*args, **kwargs)#
-

Version

-

Onnx name: Log

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the natural log of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLogSoftmax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax(*args, **kwargs)#
-

Version

-

Onnx name: LogSoftmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the log of softmax values for the given input:

-
-

LogSoftmax(input, axis) = Log(Softmax(input, axis=axis))

-
-

The “axis” attribute indicates the dimension along which LogSoftmax -will be performed. The output tensor has the same shape -and contains the LogSoftmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension LogSoftmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLogSoftmax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_1(*args, **kwargs)#
-

Version

-

Onnx name: LogSoftmax

-

This version of the operator has been available since -version 1.

-

Summary

-
-
The operator computes the logsoftmax (log of softmax) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

-
-
-

(batch_size x input_feature_dimensions). The output tensor has the same shape -and contains the logsoftmax values of the corresponding input.

-

Input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLogSoftmax_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_11(*args, **kwargs)#
-

Version

-

Onnx name: LogSoftmax

-

This version of the operator has been available since -version 11.

-

Summary

-
-
The operator computes the logsoftmax (log of softmax) values for each layer in the batch

of the given input.

-
-
-

The input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors. The output tensor has the same shape -and contains the logsoftmax values of the corresponding input.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLogSoftmax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_13(*args, **kwargs)#
-

Version

-

Onnx name: LogSoftmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the log of softmax values for the given input:

-
-

LogSoftmax(input, axis) = Log(Softmax(input, axis=axis))

-
-

The “axis” attribute indicates the dimension along which LogSoftmax -will be performed. The output tensor has the same shape -and contains the LogSoftmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension LogSoftmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLog_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLog_1(*args, **kwargs)#
-

Version

-

Onnx name: Log

-

This version of the operator has been available since -version 1.

-

Summary

-

Calculates the natural log of the given input tensor, element-wise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLog_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLog_13(*args, **kwargs)#
-

Version

-

Onnx name: Log

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the natural log of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLog_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLog_6(*args, **kwargs)#
-

Version

-

Onnx name: Log

-

This version of the operator has been available since -version 6.

-

Summary

-

Calculates the natural log of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLoop#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLoop(*args, **kwargs)#
-

Version

-

Onnx name: Loop

-

This version of the operator has been available since -version 16.

-

Summary

-

Generic Looping construct. This loop has multiple termination conditions:

-
    -
  1. Trip count. Iteration count specified at runtime. Set by -specifying the input M. Optional. Set to empty string to omit. -Note that a static trip count (specified at graph construction time) can be -specified by passing in a constant node for input M.

  2. -
  3. Loop termination condition. This is an input to the op that determines -whether to run the first iteration and also a loop-carried dependency for -the body graph. The body graph must yield a value for the condition variable, -whether this input is provided or not.

  4. -
-

This table summarizes the operating modes of this operator with equivalent -C-style code:

-
-

Operator inputs defined as (max_trip_count, condition_var).

-
-
input (“”, “”):
-
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

-
-
-

}

-
-
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … -for (int i=0; i < trip_count; ++i) {

-
-

cond = …; // ignored

-
-

}

-
-
input (trip_count, cond)

int trip_count = …; -bool cond = …; -for (int i=0; i < trip_count && cond; ++i) {

-
-

cond = …;

-
-

}

-
-
-
-

Sample usage - cond as well as trip count

-
-
-
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() -%b = Constant[value = <Scalar Tensor [6]>]() -%keepgoing = Constant[value = <Scalar Tensor [1]>]() -%max_trip_count = Constant[value = <Scalar Tensor [10]>]() -%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) -return

-
-
-

}

-
-
graph body-net (

%i[INT32, scalar] // iteration number -%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used -%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

-
-
) {

%my_local = Add(%a, %b_in) -%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b -%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition -%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated -return %keepgoing_out, %b_out, %user_defined_val

-
-
-

}

-
-

Sample equivalent C code

-
-
-
{

/* User-defined code (enclosing scope) / -int a = 3, b = 6; -bool keepgoing = true; // Analogous to input cond -/ End user-defined code */

-

/* Implicitly-defined code / -const int max_trip_count = 10; // Analogous to input M -int user_defined_vals[]; // Imagine this is resizable -/ End implicitly-defined code / -/ initialize loop-carried variables and scan-output variables */ -bool keepgoing_out = keepgoing -int b_out = b

-
-
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
-
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

-
-
-

bool keepgoing_in = keepgoing_out; -bool b_in = b_out;

-

/* User-defined code (loop body) / -int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine -b_out = a - b_in; -keepgoing_out = my_local > b_out; -user_defined_val = b_in + b_in; // b_in and b_out are different variables -/ End user-defined code */

-

/* Implicitly defined-code */ -user_defined_vals[i] = user_defined_val // accumulate scan-output values

-
-
-

} -// int t = my_local; // Can’t do this. my_local is not accessible here.

-

// The values below are bound to the output variables of the loop and therefore accessible -// b_out; user_defined_vals; keepgoing_out;

-
-
-

}

-
-

There are several things of note in this code snippet:

-
    -
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can -be referenced in the inputs of the loop.

  2. -
  3. Any values computed in the loop body that needs to be used in a subsequent -iteration or after the loop are modelled using a pair of variables in the loop-body, -consisting of an input variable (eg., b_in) and an output variable (eg., b_out). -These are referred to as loop-carried dependences. The loop operation node -supplies the input value of the input variable for the first iteration, and -returns the output value of the output variable produced by the final -iteration.

  4. -
  5. Scan_output variables are used to implicitly concatenate values computed across -all the iterations. In the above example, the value of user_defined_val computed -over all iterations are concatenated and returned as the value of user_defined_vals -after the loop.

  6. -
  7. Values created in the body cannot be accessed in the enclosing scope, -except using the mechanism described above.

  8. -
-

Note that the semantics of this op support “diagonal” or “wavefront” execution. -(See Step 3 here for an example: -https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). -Frontends should emit multi-layer RNNs as a series of While operators (with -time being the inner looping dimension), with each successive layer consuming -the scan_outputs from the previous layer, possibly going through several -point-wise operators (e.g. dropout, residual connections, linear layer).

-

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 2 and 2147483647 inputs.

-
    -
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • -
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • -
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • -
  • I tensor(int64): tensor of int64, which should be a scalar.

  • -
  • B tensor(bool): tensor of bool, which should be a scalar.

  • -
-
- -
-
-
-
-

OnnxLoop_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLoop_1(*args, **kwargs)#
-

Version

-

Onnx name: Loop

-

This version of the operator has been available since -version 1.

-

Summary

-

Generic Looping construct. This loop has multiple termination conditions:

-
    -
  1. Trip count. Iteration count specified at runtime. Set by -specifying the input M. Optional. Set to empty string to omit. -Note that a static trip count (specified at graph construction time) can be -specified by passing in a constant node for input M.

  2. -
  3. Loop termination condition. This is an input to the op that determines -whether to run the first iteration and also a loop-carried dependency for -the body graph. The body graph must yield a value for the condition variable, -whether this input is provided or not.

  4. -
-

This table summarizes the operating modes of this operator with equivalent -C-style code:

-
-

Operator inputs defined as (max_trip_count, condition_var).

-
-
input (“”, “”):
-
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

-
-
-

}

-
-
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … -for (int i=0; i < trip_count; ++i) {

-
-

cond = …; // ignored

-
-

}

-
-
input (trip_count, cond)

int trip_count = …; -bool cond = …; -for (int i=0; i < trip_count && cond; ++i) {

-
-

cond = …;

-
-

}

-
-
-
-

Sample usage - cond as well as trip count

-
-
-
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() -%b = Constant[value = <Scalar Tensor [6]>]() -%keepgoing = Constant[value = <Scalar Tensor [1]>]() -%max_trip_count = Constant[value = <Scalar Tensor [10]>]() -%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) -return

-
-
-

}

-
-
graph body-net (

%i[INT32, scalar] -%keepgoing[BOOL, scalar] -%b[INT32, scalar]

-
-
) {

%my_local = Add(%a, %b) -%b_out = Sub(%a, %b) -%keepgoing_out = Greater(%my_local, %b_out) -%user_defined_vals = Add(%b, %b) -return %keepgoing_out, %b_out, %user_defined_vals

-
-
-

}

-
-

Sample equivalent C code

-
-
-
{

/* User-defined code (enclosing scope) / -int a = 3, b = 6; -bool keepgoing = true; // Analogous to input cond -/ End user-defined code */

-

/* Implicitly-defined code / -const int max_trip_count = 10; // Analogous to input M -int user_defined_vals[]; // Imagine this is resizable -/ End implicitly-defined code */ -for (int i=0; i < max_trip_count && keepgoing; ++i) {

-
-

/* User-defined code (loop body) / -int my_local = a + b; // Reading values in the enclosing scope is fine -b = a - b; // writes fine if we specify b as a loop-carried dependency -keepgoing = my_local > b; // keepgoing is a loop-carried dependency -user_defined_vals[i] = b + b; -/ End user-defined code */

-
-

} -// my_local = 123; // Can’t do this. my_local was defined in the the body

-

// These below values are live-out from the loop and therefore accessible -b_out; user_defined_vals; keepgoing_out;

-
-
-

}

-
-

There are several things of note in this code snippet:

-
    -
  1. Values from the enclosing scope (i.e. variable a here) are in scope and can -be referenced in the inputs of the loop.

  2. -
  3. Any variables which you wish to make available in the enclosing scope (i.e. -the variables b and keepgoing) must be declared as either loop-carried -dependencies (both at the op inputs and output and at the body net input and -output) or scan_outputs.

  4. -
  5. Values created in the body cannot be accessed in the enclosing scope.

  6. -
-

Note that the semantics of this op support “diagonal” or “wavefront” execution. -(See Step 3 here for an example: -https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). -Frontends should emit multi-layer RNNs as a series of While operators (with -time being the inner looping dimension), with each successive layer consuming -the scan_outputs from the previous layer, possibly going through several -point-wise operators (e.g. dropout, residual connections, linear layer).

-

Attributes

-
    -
  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • -
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • -
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
  • I tensor(int64): tensor of int64, which should be a scalar.

  • -
  • B tensor(bool): tensor of bool, which should be a scalar.

  • -
-
- -
-
-
-
-

OnnxLoop_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLoop_11(*args, **kwargs)#
-

Version

-

Onnx name: Loop

-

This version of the operator has been available since -version 11.

-

Summary

-

Generic Looping construct. This loop has multiple termination conditions:

-
    -
  1. Trip count. Iteration count specified at runtime. Set by -specifying the input M. Optional. Set to empty string to omit. -Note that a static trip count (specified at graph construction time) can be -specified by passing in a constant node for input M.

  2. -
  3. Loop termination condition. This is an input to the op that determines -whether to run the first iteration and also a loop-carried dependency for -the body graph. The body graph must yield a value for the condition variable, -whether this input is provided or not.

  4. -
-

This table summarizes the operating modes of this operator with equivalent -C-style code:

-
-

Operator inputs defined as (max_trip_count, condition_var).

-
-
input (“”, “”):
-
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

-
-
-

}

-
-
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … -for (int i=0; i < trip_count; ++i) {

-
-

cond = …; // ignored

-
-

}

-
-
input (trip_count, cond)

int trip_count = …; -bool cond = …; -for (int i=0; i < trip_count && cond; ++i) {

-
-

cond = …;

-
-

}

-
-
-
-

Sample usage - cond as well as trip count

-
-
-
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() -%b = Constant[value = <Scalar Tensor [6]>]() -%keepgoing = Constant[value = <Scalar Tensor [1]>]() -%max_trip_count = Constant[value = <Scalar Tensor [10]>]() -%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) -return

-
-
-

}

-
-
graph body-net (

%i[INT32, scalar] // iteration number -%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used -%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

-
-
) {

%my_local = Add(%a, %b_in) -%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b -%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition -%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated -return %keepgoing_out, %b_out, %user_defined_val

-
-
-

}

-
-

Sample equivalent C code

-
-
-
{

/* User-defined code (enclosing scope) / -int a = 3, b = 6; -bool keepgoing = true; // Analogous to input cond -/ End user-defined code */

-

/* Implicitly-defined code / -const int max_trip_count = 10; // Analogous to input M -int user_defined_vals[]; // Imagine this is resizable -/ End implicitly-defined code / -/ initialize loop-carried variables and scan-output variables */ -bool keepgoing_out = keepgoing -int b_out = b

-
-
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
-
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

-
-
-

bool keepgoing_in = keepgoing_out; -bool b_in = b_out;

-

/* User-defined code (loop body) / -int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine -b_out = a - b_in; -keepgoing_out = my_local > b_out; -user_defined_val = b_in + b_in; // b_in and b_out are different variables -/ End user-defined code */

-

/* Implicitly defined-code */ -user_defined_vals[i] = user_defined_val // accumulate scan-output values

-
-
-

} -// int t = my_local; // Can’t do this. my_local is not accessible here.

-

// The values below are bound to the output variables of the loop and therefore accessible -// b_out; user_defined_vals; keepgoing_out;

-
-
-

}

-
-

There are several things of note in this code snippet:

-
    -
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can -be referenced in the inputs of the loop.

  2. -
  3. Any values computed in the loop body that needs to be used in a subsequent -iteration or after the loop are modelled using a pair of variables in the loop-body, -consisting of an input variable (eg., b_in) and an output variable (eg., b_out). -These are referred to as loop-carried dependences. The loop operation node -supplies the input value of the input variable for the first iteration, and -returns the output value of the output variable produced by the final -iteration.

  4. -
  5. Scan_output variables are used to implicitly concatenate values computed across -all the iterations. In the above example, the value of user_defined_val computed -over all iterations are concatenated and returned as the value of user_defined_vals -after the loop.

  6. -
  7. Values created in the body cannot be accessed in the enclosing scope, -except using the mechanism described above.

  8. -
-

Note that the semantics of this op support “diagonal” or “wavefront” execution. -(See Step 3 here for an example: -https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). -Frontends should emit multi-layer RNNs as a series of While operators (with -time being the inner looping dimension), with each successive layer consuming -the scan_outputs from the previous layer, possibly going through several -point-wise operators (e.g. dropout, residual connections, linear layer).

-

Attributes

-
    -
  • -
-

Inputs

-

Between 2 and 2147483647 inputs.

-
    -
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • -
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • -
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
  • I tensor(int64): tensor of int64, which should be a scalar.

  • -
  • B tensor(bool): tensor of bool, which should be a scalar.

  • -
-
- -
-
-
-
-

OnnxLoop_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLoop_13(*args, **kwargs)#
-

Version

-

Onnx name: Loop

-

This version of the operator has been available since -version 13.

-

Summary

-

Generic Looping construct. This loop has multiple termination conditions:

-
    -
  1. Trip count. Iteration count specified at runtime. Set by -specifying the input M. Optional. Set to empty string to omit. -Note that a static trip count (specified at graph construction time) can be -specified by passing in a constant node for input M.

  2. -
  3. Loop termination condition. This is an input to the op that determines -whether to run the first iteration and also a loop-carried dependency for -the body graph. The body graph must yield a value for the condition variable, -whether this input is provided or not.

  4. -
-

This table summarizes the operating modes of this operator with equivalent -C-style code:

-
-

Operator inputs defined as (max_trip_count, condition_var).

-
-
input (“”, “”):
-
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

-
-
-

}

-
-
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … -for (int i=0; i < trip_count; ++i) {

-
-

cond = …; // ignored

-
-

}

-
-
input (trip_count, cond)

int trip_count = …; -bool cond = …; -for (int i=0; i < trip_count && cond; ++i) {

-
-

cond = …;

-
-

}

-
-
-
-

Sample usage - cond as well as trip count

-
-
-
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() -%b = Constant[value = <Scalar Tensor [6]>]() -%keepgoing = Constant[value = <Scalar Tensor [1]>]() -%max_trip_count = Constant[value = <Scalar Tensor [10]>]() -%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) -return

-
-
-

}

-
-
graph body-net (

%i[INT32, scalar] // iteration number -%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used -%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

-
-
) {

%my_local = Add(%a, %b_in) -%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b -%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition -%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated -return %keepgoing_out, %b_out, %user_defined_val

-
-
-

}

-
-

Sample equivalent C code

-
-
-
{

/* User-defined code (enclosing scope) / -int a = 3, b = 6; -bool keepgoing = true; // Analogous to input cond -/ End user-defined code */

-

/* Implicitly-defined code / -const int max_trip_count = 10; // Analogous to input M -int user_defined_vals[]; // Imagine this is resizable -/ End implicitly-defined code / -/ initialize loop-carried variables and scan-output variables */ -bool keepgoing_out = keepgoing -int b_out = b

-
-
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
-
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

-
-
-

bool keepgoing_in = keepgoing_out; -bool b_in = b_out;

-

/* User-defined code (loop body) / -int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine -b_out = a - b_in; -keepgoing_out = my_local > b_out; -user_defined_val = b_in + b_in; // b_in and b_out are different variables -/ End user-defined code */

-

/* Implicitly defined-code */ -user_defined_vals[i] = user_defined_val // accumulate scan-output values

-
-
-

} -// int t = my_local; // Can’t do this. my_local is not accessible here.

-

// The values below are bound to the output variables of the loop and therefore accessible -// b_out; user_defined_vals; keepgoing_out;

-
-
-

}

-
-

There are several things of note in this code snippet:

-
    -
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can -be referenced in the inputs of the loop.

  2. -
  3. Any values computed in the loop body that needs to be used in a subsequent -iteration or after the loop are modelled using a pair of variables in the loop-body, -consisting of an input variable (eg., b_in) and an output variable (eg., b_out). -These are referred to as loop-carried dependences. The loop operation node -supplies the input value of the input variable for the first iteration, and -returns the output value of the output variable produced by the final -iteration.

  4. -
  5. Scan_output variables are used to implicitly concatenate values computed across -all the iterations. In the above example, the value of user_defined_val computed -over all iterations are concatenated and returned as the value of user_defined_vals -after the loop.

  6. -
  7. Values created in the body cannot be accessed in the enclosing scope, -except using the mechanism described above.

  8. -
-

Note that the semantics of this op support “diagonal” or “wavefront” execution. -(See Step 3 here for an example: -https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). -Frontends should emit multi-layer RNNs as a series of While operators (with -time being the inner looping dimension), with each successive layer consuming -the scan_outputs from the previous layer, possibly going through several -point-wise operators (e.g. dropout, residual connections, linear layer).

-

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 2 and 2147483647 inputs.

-
    -
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • -
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • -
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): All Tensor and Sequence types

  • -
  • I tensor(int64): tensor of int64, which should be a scalar.

  • -
  • B tensor(bool): tensor of bool, which should be a scalar.

  • -
-
- -
-
-
-
-

OnnxLoop_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLoop_16(*args, **kwargs)#
-

Version

-

Onnx name: Loop

-

This version of the operator has been available since -version 16.

-

Summary

-

Generic Looping construct. This loop has multiple termination conditions:

-
    -
  1. Trip count. Iteration count specified at runtime. Set by -specifying the input M. Optional. Set to empty string to omit. -Note that a static trip count (specified at graph construction time) can be -specified by passing in a constant node for input M.

  2. -
  3. Loop termination condition. This is an input to the op that determines -whether to run the first iteration and also a loop-carried dependency for -the body graph. The body graph must yield a value for the condition variable, -whether this input is provided or not.

  4. -
-

This table summarizes the operating modes of this operator with equivalent -C-style code:

-
-

Operator inputs defined as (max_trip_count, condition_var).

-
-
input (“”, “”):
-
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

-
-
-

}

-
-
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true -for (int i=0; cond; ++i) {

-
-

cond = …;

-
-

}

-
-
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … -for (int i=0; i < trip_count; ++i) {

-
-

cond = …; // ignored

-
-

}

-
-
input (trip_count, cond)

int trip_count = …; -bool cond = …; -for (int i=0; i < trip_count && cond; ++i) {

-
-

cond = …;

-
-

}

-
-
-
-

Sample usage - cond as well as trip count

-
-
-
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() -%b = Constant[value = <Scalar Tensor [6]>]() -%keepgoing = Constant[value = <Scalar Tensor [1]>]() -%max_trip_count = Constant[value = <Scalar Tensor [10]>]() -%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) -return

-
-
-

}

-
-
graph body-net (

%i[INT32, scalar] // iteration number -%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used -%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

-
-
) {

%my_local = Add(%a, %b_in) -%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b -%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition -%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated -return %keepgoing_out, %b_out, %user_defined_val

-
-
-

}

-
-

Sample equivalent C code

-
-
-
{

/* User-defined code (enclosing scope) / -int a = 3, b = 6; -bool keepgoing = true; // Analogous to input cond -/ End user-defined code */

-

/* Implicitly-defined code / -const int max_trip_count = 10; // Analogous to input M -int user_defined_vals[]; // Imagine this is resizable -/ End implicitly-defined code / -/ initialize loop-carried variables and scan-output variables */ -bool keepgoing_out = keepgoing -int b_out = b

-
-
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
-
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

-
-
-

bool keepgoing_in = keepgoing_out; -bool b_in = b_out;

-

/* User-defined code (loop body) / -int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine -b_out = a - b_in; -keepgoing_out = my_local > b_out; -user_defined_val = b_in + b_in; // b_in and b_out are different variables -/ End user-defined code */

-

/* Implicitly defined-code */ -user_defined_vals[i] = user_defined_val // accumulate scan-output values

-
-
-

} -// int t = my_local; // Can’t do this. my_local is not accessible here.

-

// The values below are bound to the output variables of the loop and therefore accessible -// b_out; user_defined_vals; keepgoing_out;

-
-
-

}

-
-

There are several things of note in this code snippet:

-
    -
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can -be referenced in the inputs of the loop.

  2. -
  3. Any values computed in the loop body that needs to be used in a subsequent -iteration or after the loop are modelled using a pair of variables in the loop-body, -consisting of an input variable (eg., b_in) and an output variable (eg., b_out). -These are referred to as loop-carried dependences. The loop operation node -supplies the input value of the input variable for the first iteration, and -returns the output value of the output variable produced by the final -iteration.

  4. -
  5. Scan_output variables are used to implicitly concatenate values computed across -all the iterations. In the above example, the value of user_defined_val computed -over all iterations are concatenated and returned as the value of user_defined_vals -after the loop.

  6. -
  7. Values created in the body cannot be accessed in the enclosing scope, -except using the mechanism described above.

  8. -
-

Note that the semantics of this op support “diagonal” or “wavefront” execution. -(See Step 3 here for an example: -https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). -Frontends should emit multi-layer RNNs as a series of While operators (with -time being the inner looping dimension), with each successive layer consuming -the scan_outputs from the previous layer, possibly going through several -point-wise operators (e.g. dropout, residual connections, linear layer).

-

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 2 and 2147483647 inputs.

-
    -
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • -
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • -
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • -
  • I tensor(int64): tensor of int64, which should be a scalar.

  • -
  • B tensor(bool): tensor of bool, which should be a scalar.

  • -
-
- -
-
-
-
-

OnnxLpNormalization#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpNormalization(*args, **kwargs)#
-

Version

-

Onnx name: LpNormalization

-

This version of the operator has been available since -version 1.

-

Summary

-

Given a matrix, apply Lp-normalization along the provided axis.

-

Attributes

-
    -
  • axis: The axis on which to apply normalization, -1 mean last axis. Default value is -name: "axis" i: -1 type: INT

  • -
  • p: The order of the normalization, only 1 or 2 are supported. Default value is -name: "p" i: 2 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input matrix

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Matrix after normalization

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLpNormalization_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpNormalization_1(*args, **kwargs)#
-

Version

-

Onnx name: LpNormalization

-

This version of the operator has been available since -version 1.

-

Summary

-

Given a matrix, apply Lp-normalization along the provided axis.

-

Attributes

-
    -
  • axis: The axis on which to apply normalization, -1 mean last axis. Default value is -name: "axis" i: -1 type: INT

  • -
  • p: The order of the normalization, only 1 or 2 are supported. Default value is -name: "p" i: 2 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input matrix

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Matrix after normalization

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLpPool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpPool(*args, **kwargs)#
-

Version

-

Onnx name: LpPool

-

This version of the operator has been available since -version 11.

-

Summary

-

LpPool consumes an input tensor X and applies Lp pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -Lp pooling consisting of computing the Lp norm on all values of a subset -of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • p: p value of the Lp norm used to pool over the input data. Default value is -name: "p" i: 2 type: INT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLpPool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpPool_1(*args, **kwargs)#
-

Version

-

Onnx name: LpPool

-

This version of the operator has been available since -version 1.

-

Summary

-

LpPool consumes an input tensor X and applies Lp pooling across the -the tensor according to kernel sizes, stride sizes, and pad lengths. -Lp pooling consisting of computing the Lp norm on all values of a subset -of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. DEPRECATION NOTE: auto_pad is only intended to support legacy uses, and for framework authors, one is explicitly encouraged to use explicit padding specified in the pads attribute. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • p: p value of the Lp norm used to pool over the input data, default is 2.0. Default value is -name: "p" f: 2.0 type: FLOAT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLpPool_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpPool_11(*args, **kwargs)#
-

Version

-

Onnx name: LpPool

-

This version of the operator has been available since -version 11.

-

Summary

-

LpPool consumes an input tensor X and applies Lp pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -Lp pooling consisting of computing the Lp norm on all values of a subset -of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • p: p value of the Lp norm used to pool over the input data. Default value is -name: "p" i: 2 type: INT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxLpPool_2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxLpPool_2(*args, **kwargs)#
-

Version

-

Onnx name: LpPool

-

This version of the operator has been available since -version 2.

-

Summary

-

LpPool consumes an input tensor X and applies Lp pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -Lp pooling consisting of computing the Lp norm on all values of a subset -of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • p: p value of the Lp norm used to pool over the input data. Default value is -name: "p" i: 2 type: INT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMatMul#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMul(*args, **kwargs)#
-

Version

-

Onnx name: MatMul

-

This version of the operator has been available since -version 13.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

-

Inputs

-
    -
  • A (heterogeneous)T: N-dimensional matrix A

  • -
  • B (heterogeneous)T: N-dimensional matrix B

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxMatMulInteger#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMulInteger(*args, **kwargs)#
-

Version

-

Onnx name: MatMulInteger

-

This version of the operator has been available since -version 10.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. -The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

-

Inputs

-

Between 2 and 4 inputs.

-
    -
  • A (heterogeneous)T1: N-dimensional matrix A

  • -
  • B (heterogeneous)T2: N-dimensional matrix B

  • -
  • a_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘A’. It’s optional and default value is 0. It could be a scalar or N-D tensor. Scalar refers to per tensor quantization whereas N-D refers to per row quantization. If the input is 2D of shape [M, K] then zero point tensor may be an M element vector [zp_1, zp_2, …, zp_M]. If the input is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have shape [D1, D2, M, 1].

  • -
  • b_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘B’. It’s optional and default value is 0. It could be a scalar or a N-D tensor, Scalar refers to per tensor quantization whereas N-D refers to per col quantization. If the input is 2D of shape [K, N] then zero point tensor may be an N element vector [zp_1, zp_2, …, zp_N]. If the input is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have shape [D1, D2, 1, N].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T3: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input A data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input B data type to 8-bit integer tensor.

  • -
  • T3 tensor(int32): Constrain output Y data type as 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxMatMulInteger_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMulInteger_10(*args, **kwargs)#
-

Version

-

Onnx name: MatMulInteger

-

This version of the operator has been available since -version 10.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. -The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

-

Inputs

-

Between 2 and 4 inputs.

-
    -
  • A (heterogeneous)T1: N-dimensional matrix A

  • -
  • B (heterogeneous)T2: N-dimensional matrix B

  • -
  • a_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘A’. It’s optional and default value is 0. It could be a scalar or N-D tensor. Scalar refers to per tensor quantization whereas N-D refers to per row quantization. If the input is 2D of shape [M, K] then zero point tensor may be an M element vector [zp_1, zp_2, …, zp_M]. If the input is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have shape [D1, D2, M, 1].

  • -
  • b_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘B’. It’s optional and default value is 0. It could be a scalar or a N-D tensor, Scalar refers to per tensor quantization whereas N-D refers to per col quantization. If the input is 2D of shape [K, N] then zero point tensor may be an N element vector [zp_1, zp_2, …, zp_N]. If the input is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have shape [D1, D2, 1, N].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T3: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input A data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input B data type to 8-bit integer tensor.

  • -
  • T3 tensor(int32): Constrain output Y data type as 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxMatMul_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMul_1(*args, **kwargs)#
-

Version

-

Onnx name: MatMul

-

This version of the operator has been available since -version 1.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

-

Inputs

-
    -
  • A (heterogeneous)T: N-dimensional matrix A

  • -
  • B (heterogeneous)T: N-dimensional matrix B

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMatMul_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMul_13(*args, **kwargs)#
-

Version

-

Onnx name: MatMul

-

This version of the operator has been available since -version 13.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

-

Inputs

-
    -
  • A (heterogeneous)T: N-dimensional matrix A

  • -
  • B (heterogeneous)T: N-dimensional matrix B

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxMatMul_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMatMul_9(*args, **kwargs)#
-

Version

-

Onnx name: MatMul

-

This version of the operator has been available since -version 9.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

-

Inputs

-
    -
  • A (heterogeneous)T: N-dimensional matrix A

  • -
  • B (heterogeneous)T: N-dimensional matrix B

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxMax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMaxPool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 12.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • -
  • -
  • -
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is -name: "storage_order" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8): Constrain input and output types to float and 8 bit tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxPool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool_1(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 1.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMaxPool_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool_10(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 10.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • -
  • -
  • -
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is -name: "storage_order" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxPool_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool_11(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 11.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • -
  • -
  • -
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is -name: "storage_order" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxPool_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool_12(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 12.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
-
-
-
-

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

-
-

if ceil_mode is enabled

-
* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is -name: "ceil_mode" i: 0 type: INT

  • -
  • -
  • -
  • -
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is -name: "storage_order" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8): Constrain input and output types to float and 8 bit tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxPool_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxPool_8(*args, **kwargs)#
-

Version

-

Onnx name: MaxPool

-

This version of the operator has been available since -version 8.

-

Summary

-

MaxPool consumes an input tensor X and applies max pooling across -the tensor according to kernel sizes, stride sizes, and pad lengths. -max pooling consisting of computing the max on all values of a -subset of the input tensor according to the kernel size and downsampling the -data into the output tensor Y for further processing. The output spatial shape will be following:

-
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
-
-* pad_shape[i] is sum of pads along axis i
-
-
-

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

-
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
-SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
-
-
-

And pad shape will be following if SAME_UPPER or SAME_LOWER:

-
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
-
-
-

The output of each pooling window is maximum number of elements exclude pad.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • -
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is -name: "storage_order" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • -
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxRoiPool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool(*args, **kwargs)#
-

Version

-

Onnx name: MaxRoiPool

-

This version of the operator has been available since -version 1.

-

Summary

-

ROI max pool consumes an input tensor X and region of interests (RoIs) to -apply max pooling across each RoI, to produce output 4-D tensor of shape -(num_rois, channels, pooled_shape[0], pooled_shape[1]).

-

Attributes

-
    -
  • -
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling. Default value is -name: "spatial_scale" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • -
  • rois (heterogeneous)T: RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMaxRoiPool_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool_1(*args, **kwargs)#
-

Version

-

Onnx name: MaxRoiPool

-

This version of the operator has been available since -version 1.

-

Summary

-

ROI max pool consumes an input tensor X and region of interests (RoIs) to -apply max pooling across each RoI, to produce output 4-D tensor of shape -(num_rois, channels, pooled_shape[0], pooled_shape[1]).

-

Attributes

-
    -
  • -
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling. Default value is -name: "spatial_scale" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • -
  • rois (heterogeneous)T: RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], …].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMaxUnpool#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool(*args, **kwargs)#
-

Version

-

Onnx name: MaxUnpool

-

This version of the operator has been available since -version 11.

-

Summary

-
-
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the the output information from a MaxPool op. The first -input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) -from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding -to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. -The third (optional) input is a tensor that specifies the output size of the unpooling operation.

-
-
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling -the result of an unpooling operation should give back the original input to the unpooling op.

-
-
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of -known/predictable size.

-
-
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding -pooling op that the unpooling op is trying to invert.

-
-
-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • -
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T2 tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxUnpool_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_11(*args, **kwargs)#
-

Version

-

Onnx name: MaxUnpool

-

This version of the operator has been available since -version 11.

-

Summary

-
-
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the the output information from a MaxPool op. The first -input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) -from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding -to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. -The third (optional) input is a tensor that specifies the output size of the unpooling operation.

-
-
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling -the result of an unpooling operation should give back the original input to the unpooling op.

-
-
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of -known/predictable size.

-
-
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding -pooling op that the unpooling op is trying to invert.

-
-
-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • -
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T2 tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMaxUnpool_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_9(*args, **kwargs)#
-

Version

-

Onnx name: MaxUnpool

-

This version of the operator has been available since -version 9.

-

Summary

-
-
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the the output information from a MaxPool op. The first -input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) -from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding -to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. -The third (optional) input is a tensor that specifies the output size of the unpooling operation.

-
-
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling -the result of an unpooling operation should give back the original input to the unpooling op.

-
-
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of -known/predictable size.

-
-
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding -pooling op that the unpooling op is trying to invert.

-
-
-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • -
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T2 tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxMax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax_1(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 1.

-

Summary

-

Element-wise max of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMax_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax_12(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 12.

-

Summary

-

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax_13(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMax_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax_6(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 6.

-

Summary

-

Element-wise max of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMax_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMax_8(*args, **kwargs)#
-

Version

-

Onnx name: Max

-

This version of the operator has been available since -version 8.

-

Summary

-

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • -
-

Outputs

-
    -
  • max (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMean#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMean(*args, **kwargs)#
-

Version

-

Onnx name: Mean

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • -
-

Outputs

-
    -
  • mean (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMeanVarianceNormalization#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization(*args, **kwargs)#
-

Version

-

Onnx name: MeanVarianceNormalization

-

This version of the operator has been available since -version 13.

-

Summary

-

A MeanVarianceNormalization Function: Perform mean variance normalization -on the input tensor X using formula: <br/> ` (X-EX)/sqrt(E(X-EX)^2) `

-

Attributes

-
    -
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is -name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMeanVarianceNormalization_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_13(*args, **kwargs)#
-

Version

-

Onnx name: MeanVarianceNormalization

-

This version of the operator has been available since -version 13.

-

Summary

-

A MeanVarianceNormalization Function: Perform mean variance normalization -on the input tensor X using formula: <br/> ` (X-EX)/sqrt(E(X-EX)^2) `

-

Attributes

-
    -
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is -name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMeanVarianceNormalization_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_9(*args, **kwargs)#
-

Version

-

Onnx name: MeanVarianceNormalization

-

This version of the operator has been available since -version 9.

-

Summary

-

A MeanVarianceNormalization Function: Perform mean variance normalization -on the input tensor X using formula: <br/> ` (X-EX)/sqrt(E(X-EX)^2) `

-

Attributes

-
    -
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is -name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMean_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMean_1(*args, **kwargs)#
-

Version

-

Onnx name: Mean

-

This version of the operator has been available since -version 1.

-

Summary

-

Element-wise mean of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Mean.

  • -
-

Outputs

-
    -
  • mean (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMean_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMean_13(*args, **kwargs)#
-

Version

-

Onnx name: Mean

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • -
-

Outputs

-
    -
  • mean (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMean_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMean_6(*args, **kwargs)#
-

Version

-

Onnx name: Mean

-

This version of the operator has been available since -version 6.

-

Summary

-

Element-wise mean of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Mean.

  • -
-

Outputs

-
    -
  • mean (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMean_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMean_8(*args, **kwargs)#
-

Version

-

Onnx name: Mean

-

This version of the operator has been available since -version 8.

-

Summary

-

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • -
-

Outputs

-
    -
  • mean (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMin#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMin_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin_1(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 1.

-

Summary

-

Element-wise min of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Min

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMin_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin_12(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 12.

-

Summary

-

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMin_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin_13(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMin_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin_6(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 6.

-

Summary

-

Element-wise min of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Min

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMin_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMin_8(*args, **kwargs)#
-

Version

-

Onnx name: Min

-

This version of the operator has been available since -version 8.

-

Summary

-

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • -
-

Outputs

-
    -
  • min (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMod#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMod(*args, **kwargs)#
-

Version

-

Onnx name: Mod

-

This version of the operator has been available since -version 13.

-

Summary

-
-
Performs element-wise binary modulus (with Numpy-style broadcasting support).

The sign of the remainder is the same as that of the Divisor.

-

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend -(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. -This attribute is set to 0 by default causing the behavior to be like integer mod. -Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

-

If the input type is floating point, then fmod attribute must be set to 1.

-

In case of dividend being zero, the results will be platform dependent.

-
-
-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is -name: "fmod" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Dividend tensor

  • -
  • B (heterogeneous)T: Divisor tensor

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Remainder tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMod_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMod_10(*args, **kwargs)#
-

Version

-

Onnx name: Mod

-

This version of the operator has been available since -version 10.

-

Summary

-
-
Performs element-wise binary modulus (with Numpy-style broadcasting support).

The sign of the remainder is the same as that of the Divisor.

-

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend -(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. -This attribute is set to 0 by default causing the behavior to be like integer mod. -Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

-

If the input type is floating point, then fmod attribute must be set to 1.

-

In case of dividend being zero, the results will be platform dependent.

-
-
-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is -name: "fmod" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Dividend tensor

  • -
  • B (heterogeneous)T: Divisor tensor

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Remainder tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMod_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMod_13(*args, **kwargs)#
-

Version

-

Onnx name: Mod

-

This version of the operator has been available since -version 13.

-

Summary

-
-
Performs element-wise binary modulus (with Numpy-style broadcasting support).

The sign of the remainder is the same as that of the Divisor.

-

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend -(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. -This attribute is set to 0 by default causing the behavior to be like integer mod. -Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

-

If the input type is floating point, then fmod attribute must be set to 1.

-

In case of dividend being zero, the results will be platform dependent.

-
-
-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Attributes

-
    -
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is -name: "fmod" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Dividend tensor

  • -
  • B (heterogeneous)T: Divisor tensor

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Remainder tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMomentum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMomentum(*args, **kwargs)#
-

Version

-

Onnx name: Momentum

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of stochastic gradient update with momentum. -This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. As you can imagine, SG with momentum requires -several parameters:

-
-
    -
  • The learning-rate “R”.

  • -
  • The update count “T”. That is, the number of conducted training iterations. It should -be zero in the first training iteration.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A decay coefficient of previous accumulated gradient (i.e., momentum) “alpha”.

  • -
  • The scaling coefficient of current gradient “beta”.

  • -
  • An attribute to choose either standard momentum or Nesterov’s momentum “mode” should -be used.

  • -
-
-

For the sake of simplicity, assume that there is only one tensor (called “X”) to be optimized. -Other necessary inputs are “X“‘s gradient (called “G”) and “X“‘s momentum (called “V”). This -Momentum operator maps all these inputs to the new value of “X” (called “X_new”) and its new -momentum (called “V_new”).

-

This operator supports two different momentum algorithms. Set the attribute “mode” to -“nesterov” if Nesterov’s momentum is desired. Otherwise, set the attribute “model” to -“standard” to use standard momentum. Computation details are described subsequently.

-

Let “+”, “-”, “*”, and “/” are all element-wise operations with numpy-style broadcasting.

-

Pseudo code for SG with standard momentum:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared -// values of all elements in X. -G_regularized = norm_coefficient * X + G

-

// In the first training iteration, beta should always be 1. -beta_adjusted = T > 0 ? beta : 1

-

// Compute the current momentum based on previous momentum and the current gradient. -V_new = alpha * V + beta_adjusted * G_regularized

-

// Update X. -X_new = X - R * V_new

-
-

Pseudo code for SG with Nesterov’s momentum:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared -// values of all elements in X. -G_regularized = norm_coefficient * X + G;

-

// In the first training iteration, beta should always be 1. -beta_adjusted = T > 0 ? beta : 1

-

// Compute the current momentum based on previous momentum and the current gradient. -V_new = alpha * V + beta_adjusted * G_regularized;

-

// Compute final update direction and then update X. -X_new = X - R * (G_regularized + alpha * V_new)

-
-

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”. The same -pseudo code would be extended to handle all tensors jointly. More specifically, we can view “X” as a -concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should -be concatenated too) and then our pseudo code becomes applicable.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The learning rate.

  • -
  • T (heterogeneous)T2: Update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, The expected input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, momentum of “X_1”, momentum of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new momentum of “X_1”, new momentum of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMomentum_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMomentum_1(*args, **kwargs)#
-

Version

-

Onnx name: Momentum

-

This version of the operator has been available since -version 1 of domain ai.onnx.preview.training.

-

Summary

-

Compute one iteration of stochastic gradient update with momentum. -This operator can conduct the optimization of multiple tensor variables.

-

Let’s define the behavior of this operator. As you can imagine, SG with momentum requires -several parameters:

-
-
    -
  • The learning-rate “R”.

  • -
  • The update count “T”. That is, the number of conducted training iterations. It should -be zero in the first training iteration.

  • -
  • A L2-norm regularization coefficient “norm_coefficient”.

  • -
  • A decay coefficient of previous accumulated gradient (i.e., momentum) “alpha”.

  • -
  • The scaling coefficient of current gradient “beta”.

  • -
  • An attribute to choose either standard momentum or Nesterov’s momentum “mode” should -be used.

  • -
-
-

For the sake of simplicity, assume that there is only one tensor (called “X”) to be optimized. -Other necessary inputs are “X“‘s gradient (called “G”) and “X“‘s momentum (called “V”). This -Momentum operator maps all these inputs to the new value of “X” (called “X_new”) and its new -momentum (called “V_new”).

-

This operator supports two different momentum algorithms. Set the attribute “mode” to -“nesterov” if Nesterov’s momentum is desired. Otherwise, set the attribute “model” to -“standard” to use standard momentum. Computation details are described subsequently.

-

Let “+”, “-”, “*”, and “/” are all element-wise operations with numpy-style broadcasting.

-

Pseudo code for SG with standard momentum:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared -// values of all elements in X. -G_regularized = norm_coefficient * X + G

-

// In the first training iteration, beta should always be 1. -beta_adjusted = T > 0 ? beta : 1

-

// Compute the current momentum based on previous momentum and the current gradient. -V_new = alpha * V + beta_adjusted * G_regularized

-

// Update X. -X_new = X - R * V_new

-
-

Pseudo code for SG with Nesterov’s momentum:

-
-

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared -// values of all elements in X. -G_regularized = norm_coefficient * X + G;

-

// In the first training iteration, beta should always be 1. -beta_adjusted = T > 0 ? beta : 1

-

// Compute the current momentum based on previous momentum and the current gradient. -V_new = alpha * V + beta_adjusted * G_regularized;

-

// Compute final update direction and then update X. -X_new = X - R * (G_regularized + alpha * V_new)

-
-

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”. The same -pseudo code would be extended to handle all tensors jointly. More specifically, we can view “X” as a -concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should -be concatenated too) and then our pseudo code becomes applicable.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 3 and 2147483647 inputs.

-
    -
  • R (heterogeneous)T1: The learning rate.

  • -
  • T (heterogeneous)T2: Update count of “X”. It should be a scalar.

  • -
  • inputs (variadic)T3: It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, The expected input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, momentum of “X_1”, momentum of “X_2”].

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic)T3: It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new momentum of “X_1”, new momentum of “X_2”].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • -
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • -
  • T3 tensor(float), tensor(double): Constrain input types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMul#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMul_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul_1(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 1.

-

Summary

-

Performs element-wise binary multiplication (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxMul_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul_13(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 13.

-

Summary

-

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMul_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul_14(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMul_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul_6(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 6.

-

Summary

-

Performs element-wise binary multiplication (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMul_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMul_7(*args, **kwargs)#
-

Version

-

Onnx name: Mul

-

This version of the operator has been available since -version 7.

-

Summary

-

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxMultinomial#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMultinomial(*args, **kwargs)#
-

Version

-

Onnx name: Multinomial

-

This version of the operator has been available since -version 7.

-

Summary

-

Generate a tensor of samples from a multinomial distribution according to the probabilities -of each of the possible outcomes.

-

Attributes

-
    -
  • dtype: (Optional) The data type for the elements of the output tensor, if not specified, we will use int32. Default value is -name: "dtype" i: 6 type: INT

  • -
  • sample_size: Number of times to sample. Default value is -name: "sample_size" i: 1 type: INT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(int32), tensor(int64): Constrain output types to integral tensors.

  • -
-
- -
-
-
-
-

OnnxMultinomial_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxMultinomial_7(*args, **kwargs)#
-

Version

-

Onnx name: Multinomial

-

This version of the operator has been available since -version 7.

-

Summary

-

Generate a tensor of samples from a multinomial distribution according to the probabilities -of each of the possible outcomes.

-

Attributes

-
    -
  • dtype: (Optional) The data type for the elements of the output tensor, if not specified, we will use int32. Default value is -name: "dtype" i: 6 type: INT

  • -
  • sample_size: Number of times to sample. Default value is -name: "sample_size" i: 1 type: INT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T2 tensor(int32), tensor(int64): Constrain output types to integral tensors.

  • -
-
- -
-
-
-
-

OnnxNeg#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNeg(*args, **kwargs)#
-

Version

-

Onnx name: Neg

-

This version of the operator has been available since -version 13.

-

Summary

-

Neg takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where each element flipped sign, y = -x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • -
-
- -
-
-
-
-

OnnxNeg_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNeg_1(*args, **kwargs)#
-

Version

-

Onnx name: Neg

-

This version of the operator has been available since -version 1.

-

Summary

-

Neg takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where each element flipped sign, y = -x, is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxNeg_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNeg_13(*args, **kwargs)#
-

Version

-

Onnx name: Neg

-

This version of the operator has been available since -version 13.

-

Summary

-

Neg takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where each element flipped sign, y = -x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • -
-
- -
-
-
-
-

OnnxNeg_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNeg_6(*args, **kwargs)#
-

Version

-

Onnx name: Neg

-

This version of the operator has been available since -version 6.

-

Summary

-

Neg takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where each element flipped sign, y = -x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double): Constrain input and output types to signed numeric tensors.

  • -
-
- -
-
-
-
-

OnnxNegativeLogLikelihoodLoss#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss(*args, **kwargs)#
-

Version

-

Onnx name: NegativeLogLikelihoodLoss

-

This version of the operator has been available since -version 13.

-

Summary

-

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. -Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. -The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). -The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) -or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. -The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

-
-

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k].

-
-

When an optional “weight” is provided, the sample loss is calculated as:

-
-

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k] * weight[c].

-
-

loss is zero for the case when target-value equals ignore_index.

-
-

loss[n][d_1][d_2]…[d_k] = 0, when target[n][d_1][d_2]…[d_k] = ignore_index

-
-

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). -If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

-
-

mean(loss), if “weight” is not provided,

-
-

or if weight is provided,

-
-

sum(loss) / sum(weight[target[n][d_1][d_2]…[d_k]]]), for all samples.

-
-
-
If “reduction” attribute is set to “sum”, the output is a scalar:

sum(loss).

-
-
-

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.

-

Example 1:

-
-

// negative log likelihood loss, “none” reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]]

-

loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1]

-
-
-
-

// print(loss) -// [[-3. -2.] -// [-0. -2.]]

-
-

Example 2:

-
-

// weighted negative log likelihood loss, sum reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c]

-
-
-
-

loss = np.sum(loss) -// print(loss) -// -1.1

-
-

Example 3:

-
-

// weighted negative log likelihood loss, mean reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -weight_total = 0 -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c] -weight_total = weight_total + weight[c]

-
-
-
-

loss = np.sum(loss) / weight_total -// print(loss) -// -1.57

-
-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • -
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-
    -
  • loss (heterogeneous)T: The negative log likelihood loss

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxNegativeLogLikelihoodLoss_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_12(*args, **kwargs)#
-

Version

-

Onnx name: NegativeLogLikelihoodLoss

-

This version of the operator has been available since -version 12.

-

Summary

-

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. -Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. -The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). -The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) -or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. -The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

-
-

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k].

-
-
-
When an optional “weight” is provided, the sample loss is calculated as:

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k] * weight[c].

-
-
-

loss is zero for the case when target-value equals ignore_index.

-
-

loss[n][d_1][d_2]…[d_k] = 0, when target[n][d_1][d_2]…[d_k] = ignore_index

-
-

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). -If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

-
-

mean(loss), if “weight” is not provided,

-
-
-
or if weight is provided,

sum(loss) / sum(weight[target[n][d_1][d_2]…[d_k]]]), for all samples.

-
-
If “reduction” attribute is set to “sum”, the output is a scalar:

sum(loss).

-
-
-

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. -Example 1:

-
-

// negative log likelihood loss, “none” reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1]

-
-
-
-

// print(loss) -// [[-3. -2.] -// [-0. -2.]]

-
-
-
Example 2:

// weighted negative log likelihood loss, sum reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c]

-
-
-
-

loss = np.sum(loss) -// print(loss) -// -1.1

-
-
Example 3:

// weighted negative log likelihood loss, mean reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -weight_total = 0 -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c] -weight_total = weight_total + weight[c]

-
-
-
-

loss = np.sum(loss) / weight_total -// print(loss) -// -1.57

-
-
-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • -
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-
    -
  • loss (heterogeneous)T: The negative log likelihood loss

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxNegativeLogLikelihoodLoss_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_13(*args, **kwargs)#
-

Version

-

Onnx name: NegativeLogLikelihoodLoss

-

This version of the operator has been available since -version 13.

-

Summary

-

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. -Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. -The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). -The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) -or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. -The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

-
-

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k].

-
-

When an optional “weight” is provided, the sample loss is calculated as:

-
-

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k] * weight[c].

-
-

loss is zero for the case when target-value equals ignore_index.

-
-

loss[n][d_1][d_2]…[d_k] = 0, when target[n][d_1][d_2]…[d_k] = ignore_index

-
-

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). -If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

-
-

mean(loss), if “weight” is not provided,

-
-

or if weight is provided,

-
-

sum(loss) / sum(weight[target[n][d_1][d_2]…[d_k]]]), for all samples.

-
-
-
If “reduction” attribute is set to “sum”, the output is a scalar:

sum(loss).

-
-
-

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.

-

Example 1:

-
-

// negative log likelihood loss, “none” reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]]

-

loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1]

-
-
-
-

// print(loss) -// [[-3. -2.] -// [-0. -2.]]

-
-

Example 2:

-
-

// weighted negative log likelihood loss, sum reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c]

-
-
-
-

loss = np.sum(loss) -// print(loss) -// -1.1

-
-

Example 3:

-
-

// weighted negative log likelihood loss, mean reduction -N, C, d1 = 2, 3, 2 -input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

-
-

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

-
-

target = [[2, 1], [0, 2]] -weight = [0.2, 0.3, 0.1] -loss = np.zeros((N, d1)) -weight_total = 0 -for n in range(N):

-
-
-
for d_1 in range(d1):

c = target[n][d_1] -loss[n][d_1] = -input[n][c][d_1] * weight[c] -weight_total = weight_total + weight[c]

-
-
-
-

loss = np.sum(loss) / weight_total -// print(loss) -// -1.57

-
-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • -
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-
    -
  • loss (heterogeneous)T: The negative log likelihood loss

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxNonMaxSuppression#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression(*args, **kwargs)#
-

Version

-

Onnx name: NonMaxSuppression

-

This version of the operator has been available since -version 11.

-

Summary

-

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. -Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. -Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to -orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system -result in the same boxes being selected by the algorithm. -The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. -The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

-

Attributes

-
    -
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is -name: "center_point_box" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 5 inputs.

-
    -
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • -
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • -
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • -
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • -
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • -
-

Outputs

-
    -
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • -
-
- -
-
-
-
-

OnnxNonMaxSuppression_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_10(*args, **kwargs)#
-

Version

-

Onnx name: NonMaxSuppression

-

This version of the operator has been available since -version 10.

-

Summary

-

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. -Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. -Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to -orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system -result in the same boxes being selected by the algorithm. -The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. -The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

-

Attributes

-
    -
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is -name: "center_point_box" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 5 inputs.

-
    -
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • -
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • -
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • -
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • -
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • -
-

Outputs

-
    -
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • -
-
- -
-
-
-
-

OnnxNonMaxSuppression_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_11(*args, **kwargs)#
-

Version

-

Onnx name: NonMaxSuppression

-

This version of the operator has been available since -version 11.

-

Summary

-

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. -Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. -Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to -orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system -result in the same boxes being selected by the algorithm. -The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. -The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

-

Attributes

-
    -
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is -name: "center_point_box" i: 0 type: INT

  • -
-

Inputs

-

Between 2 and 5 inputs.

-
    -
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • -
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • -
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • -
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • -
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • -
-

Outputs

-
    -
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • -
-
- -
-
-
-
-

OnnxNonZero#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonZero(*args, **kwargs)#
-

Version

-

Onnx name: NonZero

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the indices of the elements that are non-zero -(in row-major order - by dimension). -NonZero behaves similar to numpy.nonzero: -https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html

-

Inputs

-
    -
  • X (heterogeneous)T: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(int64): output

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • -
-
- -
-
-
-
-

OnnxNonZero_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonZero_13(*args, **kwargs)#
-

Version

-

Onnx name: NonZero

-

This version of the operator has been available since -version 13.

-

Summary

-

Returns the indices of the elements that are non-zero -(in row-major order - by dimension). -NonZero behaves similar to numpy.nonzero: -https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html

-

Inputs

-
    -
  • X (heterogeneous)T: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(int64): output

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • -
-
- -
-
-
-
-

OnnxNonZero_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNonZero_9(*args, **kwargs)#
-

Version

-

Onnx name: NonZero

-

This version of the operator has been available since -version 9.

-

Summary

-

Returns the indices of the elements that are non-zero -(in row-major order - by dimension). -NonZero behaves similar to numpy.nonzero: -https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html

-

Inputs

-
    -
  • X (heterogeneous)T: input

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(int64): output

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • -
-
- -
-
-
-
-

OnnxNormalizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNormalizer(*args, **kwargs)#
-

Version

-

Onnx name: Normalizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-
-

Normalize the input. There are three normalization modes, which have the corresponding formulas, -defined using element-wise infix operators ‘/’ and ‘^’ and tensor-wide functions ‘max’ and ‘sum’:

-

Max: Y = X / max(X)

-

L1: Y = X / sum(X)

-

L2: Y = sqrt(X^2 / sum(X^2)}

-

In all modes, if the divisor is zero, Y == X.

-

For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row -of the batch is normalized independently.

-
-

Attributes

-
    -
  • norm: One of ‘MAX,’ ‘L1,’ ‘L2’ Default value is -name: "norm" s: "MAX" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be encoded, a tensor of shape [N,C] or [C]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Encoded output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxNormalizer_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNormalizer_1(*args, **kwargs)#
-

Version

-

Onnx name: Normalizer

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-
-

Normalize the input. There are three normalization modes, which have the corresponding formulas, -defined using element-wise infix operators ‘/’ and ‘^’ and tensor-wide functions ‘max’ and ‘sum’:

-

Max: Y = X / max(X)

-

L1: Y = X / sum(X)

-

L2: Y = sqrt(X^2 / sum(X^2)}

-

In all modes, if the divisor is zero, Y == X.

-

For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row -of the batch is normalized independently.

-
-

Attributes

-
    -
  • norm: One of ‘MAX,’ ‘L1,’ ‘L2’ Default value is -name: "norm" s: "MAX" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be encoded, a tensor of shape [N,C] or [C]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Encoded output data

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxNot#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNot(*args, **kwargs)#
-

Version

-

Onnx name: Not

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the negation of the input tensor element-wise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input/output to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxNot_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxNot_1(*args, **kwargs)#
-

Version

-

Onnx name: Not

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the negation of the input tensor element-wise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input/output to boolean tensors.

  • -
-
- -
-
-
-
-

OnnxOneHot#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOneHot(*args, **kwargs)#
-

Version

-

Onnx name: OneHot

-

This version of the operator has been available since -version 11.

-

Summary

-

Produces a one-hot tensor based on inputs. -The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ -and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ -are specified as part of required input argument ‘values’, which is a two-element tensor of format -[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the -input tensor. The additional dimension is for one-hot representation. The additional dimension will -be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional -dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional -dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same -as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside -the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the -output tensor.

-

when axis = 0: -output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.

-

when axis = -1: -output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.

-

Attributes

-
    -
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices). Default value is -name: "axis" i: -1 type: INT

  • -
-

Inputs

-
    -
  • indices (heterogeneous)T1: Input tensor containing indices. Any entries in the ‘indices’ input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • -
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [-depth, depth-1]. In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • -
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
-
- -
-
-
-
-

OnnxOneHotEncoder#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder(*args, **kwargs)#
-

Version

-

Onnx name: OneHotEncoder

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Replace each input element with an array of ones and zeros, where a single -one is placed at the index of the category that was passed in. The total category count -will determine the size of the extra dimension of the output array Y.

-

For example, if we pass a tensor with a single value of 4, and a category count of 8, -the output will be a tensor with [0,0,0,0,1,0,0,0].

-

This operator assumes every input feature is from the same set of categories.

-

If the input is a tensor of float, int32, or double, the data will be cast -to integers and the cats_int64s category list will be used for the lookups.

-

Attributes

-
    -
  • -
  • -
  • zeros: If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail. Default value is -name: "zeros" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be encoded.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Encoded output data, having one more dimension than X.

  • -
-

Type Constraints

-
    -
  • T tensor(string), tensor(int64), tensor(int32), tensor(float), tensor(double): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxOneHotEncoder_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder_1(*args, **kwargs)#
-

Version

-

Onnx name: OneHotEncoder

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Replace each input element with an array of ones and zeros, where a single -one is placed at the index of the category that was passed in. The total category count -will determine the size of the extra dimension of the output array Y.

-

For example, if we pass a tensor with a single value of 4, and a category count of 8, -the output will be a tensor with [0,0,0,0,1,0,0,0].

-

This operator assumes every input feature is from the same set of categories.

-

If the input is a tensor of float, int32, or double, the data will be cast -to integers and the cats_int64s category list will be used for the lookups.

-

Attributes

-
    -
  • -
  • -
  • zeros: If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail. Default value is -name: "zeros" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be encoded.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Encoded output data, having one more dimension than X.

  • -
-

Type Constraints

-
    -
  • T tensor(string), tensor(int64), tensor(int32), tensor(float), tensor(double): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxOneHot_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOneHot_11(*args, **kwargs)#
-

Version

-

Onnx name: OneHot

-

This version of the operator has been available since -version 11.

-

Summary

-

Produces a one-hot tensor based on inputs. -The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ -and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ -are specified as part of required input argument ‘values’, which is a two-element tensor of format -[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the -input tensor. The additional dimension is for one-hot representation. The additional dimension will -be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional -dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional -dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same -as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside -the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the -output tensor.

-

when axis = 0: -output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.

-

when axis = -1: -output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.

-

Attributes

-
    -
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices). Default value is -name: "axis" i: -1 type: INT

  • -
-

Inputs

-
    -
  • indices (heterogeneous)T1: Input tensor containing indices. Any entries in the ‘indices’ input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • -
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [-depth, depth-1]. In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • -
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
-
- -
-
-
-
-

OnnxOneHot_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOneHot_9(*args, **kwargs)#
-

Version

-

Onnx name: OneHot

-

This version of the operator has been available since -version 9.

-

Summary

-

Produces a one-hot tensor based on inputs. -The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ -and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ -are specified as part of required input argument ‘values’, which is a two-element tensor of format -[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the -input tensor. The additional dimension is for one-hot representation. The additional dimension will -be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional -dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional -dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same -as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside -the range [0, depth) will result in one-hot representation with all ‘off_value’ values in the -output tensor.

-

Attributes

-
    -
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Default value is -name: "axis" i: -1 type: INT

  • -
-

Inputs

-
    -
  • indices (heterogeneous)T1: Input tensor containing indices. The values must be non-negative integers. Any entries in the ‘indices’ input tensor with values outside the range [0, depth) will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • -
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [0, depth). In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • -
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
-
- -
-
-
-
-

OnnxOptional#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptional(*args, **kwargs)#
-

Version

-

Onnx name: Optional

-

This version of the operator has been available since -version 15.

-

Summary

-

Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, -or a non-empty value containing the input element.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 0 and 1 inputs.

-
    -
  • input (optional, heterogeneous)V: The input element.

  • -
-

Outputs

-
    -
  • output (heterogeneous)O: The optional output enclosing the input element.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrains input type to all tensor and sequence types.

  • -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains output type to all optional tensor or optional sequence types.

  • -
-
- -
-
-
-
-

OnnxOptionalGetElement#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement(*args, **kwargs)#
-

Version

-

Onnx name: OptionalGetElement

-

This version of the operator has been available since -version 15.

-

Summary

-

Outputs the element in the optional-type input. It is an error if the input value does not have an element -and the behavior is undefined in this case.

-

Inputs

-
    -
  • input (heterogeneous)O: The optional input.

  • -
-

Outputs

-
    -
  • output (heterogeneous)V: Output element in the optional input.

  • -
-

Type Constraints

-
    -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains input type to optional tensor and optional sequence types.

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output type to all tensor or sequence types.

  • -
-
- -
-
-
-
-

OnnxOptionalGetElement_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement_15(*args, **kwargs)#
-

Version

-

Onnx name: OptionalGetElement

-

This version of the operator has been available since -version 15.

-

Summary

-

Outputs the element in the optional-type input. It is an error if the input value does not have an element -and the behavior is undefined in this case.

-

Inputs

-
    -
  • input (heterogeneous)O: The optional input.

  • -
-

Outputs

-
    -
  • output (heterogeneous)V: Output element in the optional input.

  • -
-

Type Constraints

-
    -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains input type to optional tensor and optional sequence types.

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output type to all tensor or sequence types.

  • -
-
- -
-
-
-
-

OnnxOptionalHasElement#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement(*args, **kwargs)#
-

Version

-

Onnx name: OptionalHasElement

-

This version of the operator has been available since -version 15.

-

Summary

-

Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false.

-

Inputs

-
    -
  • input (heterogeneous)O: The optional input.

  • -
-

Outputs

-
    -
  • output (heterogeneous)B: A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.

  • -
-

Type Constraints

-
    -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains input type to optional tensor and optional sequence types.

  • -
  • B tensor(bool): Constrains output to a boolean tensor.

  • -
-
- -
-
-
-
-

OnnxOptionalHasElement_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement_15(*args, **kwargs)#
-

Version

-

Onnx name: OptionalHasElement

-

This version of the operator has been available since -version 15.

-

Summary

-

Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false.

-

Inputs

-
    -
  • input (heterogeneous)O: The optional input.

  • -
-

Outputs

-
    -
  • output (heterogeneous)B: A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.

  • -
-

Type Constraints

-
    -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains input type to optional tensor and optional sequence types.

  • -
  • B tensor(bool): Constrains output to a boolean tensor.

  • -
-
- -
-
-
-
-

OnnxOptional_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOptional_15(*args, **kwargs)#
-

Version

-

Onnx name: Optional

-

This version of the operator has been available since -version 15.

-

Summary

-

Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, -or a non-empty value containing the input element.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 0 and 1 inputs.

-
    -
  • input (optional, heterogeneous)V: The input element.

  • -
-

Outputs

-
    -
  • output (heterogeneous)O: The optional output enclosing the input element.

  • -
-

Type Constraints

-
    -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrains input type to all tensor and sequence types.

  • -
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrains output type to all optional tensor or optional sequence types.

  • -
-
- -
-
-
-
-

OnnxOr#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOr(*args, **kwargs)#
-

Version

-

Onnx name: Or

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the or logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxOr_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOr_1(*args, **kwargs)#
-

Version

-

Onnx name: Or

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the or logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxOr_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxOr_7(*args, **kwargs)#
-

Version

-

Onnx name: Or

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the or logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxPRelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 16.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise.

-

History -- Version 16 adds bfloat16 to the types allowed. -This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor (same size as X)

  • -
-

Type Constraints

-
    -
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPRelu_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu_1(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 1.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. If Slope is of size 1, the value is sharedacross different channels

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPRelu_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu_16(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 16.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise.

-

History -- Version 16 adds bfloat16 to the types allowed. -This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor (same size as X)

  • -
-

Type Constraints

-
    -
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPRelu_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu_6(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 6.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. If Slope is of size 1, the value is sharedacross different channels

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPRelu_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu_7(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 7.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise. -This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor (same size as X)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPRelu_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPRelu_9(*args, **kwargs)#
-

Version

-

Onnx name: PRelu

-

This version of the operator has been available since -version 9.

-

Summary

-

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one -output data (Tensor<T>) where the function f(x) = slope * x for x < 0, -f(x) = x for x >= 0., is applied to the data tensor elementwise. -This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor (same size as X)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPad#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPad(*args, **kwargs)#
-

Version

-

Onnx name: Pad

-

This version of the operator has been available since -version 13.

-

Summary

-

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, -a padded tensor (output) is generated.

-

The three supported modes are (similar to corresponding modes supported by numpy.pad):

-
    -
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. -
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. -
  5. edge - pads with the edge values of array

  6. -
-
-
Example 1 (constant mode):

Insert 0 pads to the beginning of the second dimension.

-

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘constant’

-

constant_value = 0.0

-

output = -[

-
-

[0.0, 0.0, 1.0, 1.2], -[0.0, 0.0, 2.3, 3.4], -[0.0, 0.0, 4.5, 5.7],

-
-

]

-
-
Example 2 (reflect mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘reflect’

-

output = -[

-
-

[1.0, 1.2, 1.0, 1.2], -[2.3, 3.4, 2.3, 3.4], -[4.5, 5.7, 4.5, 5.7],

-
-

]

-
-
Example 3 (edge mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘edge’

-

output = -[

-
-

[1.0, 1.0, 1.0, 1.2], -[2.3, 2.3, 2.3, 3.4], -[4.5, 4.5, 4.5, 5.7],

-
-

]

-
-
-

Attributes

-
    -
  • mode: Supported modes: constant`(default), `reflect, edge Default value is -name: "mode" s: "constant" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • data (heterogeneous)T: Input tensor.

  • -
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * input_rank]. pads format should be: [x1_begin, x2_begin,…,x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis i and xi_end, the number of pad values added at the end of axis i.

  • -
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor after padding.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxPad_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPad_1(*args, **kwargs)#
-

Version

-

Onnx name: Pad

-

This version of the operator has been available since -version 1.

-

Summary

-

Given data tensor, paddings, mode, and value. -Example:

-
-

Insert 0 paddings to the beginning of the second dimension. -data = [

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

] -paddings = [0, 0, 2, 0] -output = [

-
-
-
[

[0.0, 0.0, 1.0, 1.2], -[0.0, 0.0, 2.3, 3.4], -[0.0, 0.0, 4.5, 5.7],

-
-
-

],

-
-

]

-
-

Attributes

-
    -
  • mode: Three modes: constant(default), reflect, edge Default value is -name: "mode" s: "constant" type: STRING

  • -
  • -
  • value: One float, indicates the value to be filled, default is 0 Default value is -name: "value" f: 0.0 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Input tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor after padding.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPad_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPad_11(*args, **kwargs)#
-

Version

-

Onnx name: Pad

-

This version of the operator has been available since -version 11.

-

Summary

-

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, -a padded tensor (output) is generated.

-

The three supported modes are (similar to corresponding modes supported by numpy.pad):

-
    -
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0)

  2. -
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. -
  5. edge - pads with the edge values of array

  6. -
-
-
Example 1 (constant mode):

Insert 0 pads to the beginning of the second dimension.

-

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘constant’

-

constant_value = 0.0

-

output = -[

-
-

[0.0, 0.0, 1.0, 1.2], -[0.0, 0.0, 2.3, 3.4], -[0.0, 0.0, 4.5, 5.7],

-
-

]

-
-
Example 2 (reflect mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘reflect’

-

output = -[

-
-

[1.0, 1.2, 1.0, 1.2], -[2.3, 3.4, 2.3, 3.4], -[4.5, 5.7, 4.5, 5.7],

-
-

]

-
-
Example 3 (edge mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘edge’

-

output = -[

-
-

[1.0, 1.0, 1.0, 1.2], -[2.3, 2.3, 2.3, 3.4], -[4.5, 4.5, 4.5, 5.7],

-
-

]

-
-
-

Attributes

-
    -
  • mode: Supported modes: constant`(default), `reflect, edge Default value is -name: "mode" s: "constant" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • data (heterogeneous)T: Input tensor.

  • -
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * input_rank]. pads format should be: [x1_begin, x2_begin,…,x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis i and xi_end, the number of pad values added at the end of axis i.

  • -
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor after padding.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input and output to only numeric types.

  • -
-
- -
-
-
-
-

OnnxPad_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPad_13(*args, **kwargs)#
-

Version

-

Onnx name: Pad

-

This version of the operator has been available since -version 13.

-

Summary

-

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, -a padded tensor (output) is generated.

-

The three supported modes are (similar to corresponding modes supported by numpy.pad):

-
    -
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. -
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. -
  5. edge - pads with the edge values of array

  6. -
-
-
Example 1 (constant mode):

Insert 0 pads to the beginning of the second dimension.

-

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘constant’

-

constant_value = 0.0

-

output = -[

-
-

[0.0, 0.0, 1.0, 1.2], -[0.0, 0.0, 2.3, 3.4], -[0.0, 0.0, 4.5, 5.7],

-
-

]

-
-
Example 2 (reflect mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘reflect’

-

output = -[

-
-

[1.0, 1.2, 1.0, 1.2], -[2.3, 3.4, 2.3, 3.4], -[4.5, 5.7, 4.5, 5.7],

-
-

]

-
-
Example 3 (edge mode):

data = -[

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

]

-

pads = [0, 2, 0, 0]

-

mode = ‘edge’

-

output = -[

-
-

[1.0, 1.0, 1.0, 1.2], -[2.3, 2.3, 2.3, 3.4], -[4.5, 4.5, 4.5, 5.7],

-
-

]

-
-
-

Attributes

-
    -
  • mode: Supported modes: constant`(default), `reflect, edge Default value is -name: "mode" s: "constant" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • data (heterogeneous)T: Input tensor.

  • -
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * input_rank]. pads format should be: [x1_begin, x2_begin,…,x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis i and xi_end, the number of pad values added at the end of axis i.

  • -
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor after padding.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxPad_2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPad_2(*args, **kwargs)#
-

Version

-

Onnx name: Pad

-

This version of the operator has been available since -version 2.

-

Summary

-

Given data tensor, pads, mode, and value. -Example:

-
-

Insert 0 pads to the beginning of the second dimension. -data = [

-
-

[1.0, 1.2], -[2.3, 3.4], -[4.5, 5.7],

-
-

] -pads = [0, 2, 0, 0] -output = [

-
-
-
[

[0.0, 0.0, 1.0, 1.2], -[0.0, 0.0, 2.3, 3.4], -[0.0, 0.0, 4.5, 5.7],

-
-
-

],

-
-

]

-
-

Attributes

-
    -
  • mode: Three modes: constant(default), reflect, edge Default value is -name: "mode" s: "constant" type: STRING

  • -
  • -
  • value: One float, indicates the value to be filled. Default value is -name: "value" f: 0.0 type: FLOAT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Input tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor after padding.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPow#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 15.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: First operand, base of the exponent.

  • -
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input Y types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPow_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow_1(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 1.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise.

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor of any shape, base of the exponent.

  • -
  • Y (heterogeneous)T: Input tensor of any shape broadcastable to X shape, the exponent component.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor (same size as X)

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxPow_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow_12(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 12.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: First operand, base of the exponent.

  • -
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input X and output types to float/int tensors.

  • -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input Y types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPow_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow_13(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 13.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: First operand, base of the exponent.

  • -
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input Y types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPow_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow_15(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 15.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: First operand, base of the exponent.

  • -
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input Y types to float/int tensors.

  • -
-
- -
-
-
-
-

OnnxPow_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxPow_7(*args, **kwargs)#
-

Version

-

Onnx name: Pow

-

This version of the operator has been available since -version 7.

-

Summary

-

Pow takes input data (Tensor<T>) and exponent Tensor, and -produces one output data (Tensor<T>) where the function f(x) = x^exponent, -is applied to the data tensor elementwise. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • X (heterogeneous)T: First operand, base of the exponent.

  • -
  • Y (heterogeneous)T: Second operand, power of the exponent.

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxQLinearConv#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQLinearConv(*args, **kwargs)#
-

Version

-

Onnx name: QLinearConv

-

This version of the operator has been available since -version 10.

-

Summary

-

The convolution operator consumes a quantized input tensor, its scale and zero point, -a quantized filter, its scale and zero point, and output’s scale and zero point, -and computes the quantized output. Each scale and zero-point pair must have same shape. -It means they must be either scalars (per tensor) or 1-D tensors (per output channel). -Each input or output and its related zero point must have same type. -When bias is present it must be quantized using scale = input scale * weight scale and -zero point as 0.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 8 and 9 inputs.

-
    -
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • x_scale (heterogeneous)tensor(float): Scale tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • x_zero_point (heterogeneous)T1: Zero point tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • -
  • w_scale (heterogeneous)tensor(float): Scale tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • -
  • w_zero_point (heterogeneous)T2: Zero point tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • -
  • y_scale (heterogeneous)tensor(float): Scale tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • y_zero_point (heterogeneous)T3: Zero point tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • B (optional, heterogeneous)T4: Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain filter type to 8-bit integer tensor.

  • -
  • T3 tensor(int8), tensor(uint8): Constrain output type to 8-bit integer tensor.

  • -
  • T4 tensor(int32): Constrain bias type to 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQLinearConv_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQLinearConv_10(*args, **kwargs)#
-

Version

-

Onnx name: QLinearConv

-

This version of the operator has been available since -version 10.

-

Summary

-

The convolution operator consumes a quantized input tensor, its scale and zero point, -a quantized filter, its scale and zero point, and output’s scale and zero point, -and computes the quantized output. Each scale and zero-point pair must have same shape. -It means they must be either scalars (per tensor) or 1-D tensors (per output channel). -Each input or output and its related zero point must have same type. -When bias is present it must be quantized using scale = input scale * weight scale and -zero point as 0.

-

Attributes

-
    -
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is -name: "auto_pad" s: "NOTSET" type: STRING

  • -
  • -
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is -name: "group" i: 1 type: INT

  • -
  • -
  • -
  • -
-

Inputs

-

Between 8 and 9 inputs.

-
    -
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • -
  • x_scale (heterogeneous)tensor(float): Scale tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • x_zero_point (heterogeneous)T1: Zero point tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • -
  • w_scale (heterogeneous)tensor(float): Scale tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • -
  • w_zero_point (heterogeneous)T2: Zero point tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • -
  • y_scale (heterogeneous)tensor(float): Scale tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • y_zero_point (heterogeneous)T3: Zero point tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • B (optional, heterogeneous)T4: Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain filter type to 8-bit integer tensor.

  • -
  • T3 tensor(int8), tensor(uint8): Constrain output type to 8-bit integer tensor.

  • -
  • T4 tensor(int32): Constrain bias type to 32-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQLinearMatMul#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul(*args, **kwargs)#
-

Version

-

Onnx name: QLinearMatMul

-

This version of the operator has been available since -version 10.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. -It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, -and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. -Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor -(per row for ‘a’ and per column for ‘b’). Scalar refers to per tensor quantization whereas N-D refers to per row -or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be -an M element vector [v_1, v_2, …, v_M] for per row quantization and K element vector of shape [v_1, v_2, …, v_K] -for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may -have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. -Production must never overflow, and accumulation may overflow if and only if in 32 bits.

-

Inputs

-
    -
  • a (heterogeneous)T1: N-dimensional quantized matrix a

  • -
  • a_scale (heterogeneous)tensor(float): scale of quantized input a

  • -
  • a_zero_point (heterogeneous)T1: zero point of quantized input a

  • -
  • b (heterogeneous)T2: N-dimensional quantized matrix b

  • -
  • b_scale (heterogeneous)tensor(float): scale of quantized input b

  • -
  • b_zero_point (heterogeneous)T2: zero point of quantized input b

  • -
  • y_scale (heterogeneous)tensor(float): scale of quantized output y

  • -
  • y_zero_point (heterogeneous)T3: zero point of quantized output y

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Quantized matrix multiply results from a * b

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input a and its zero point data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input b and its zero point data type to 8-bit integer tensor.

  • -
  • T3 tensor(int8), tensor(uint8): Constrain output y and its zero point data type to 8-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQLinearMatMul_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul_10(*args, **kwargs)#
-

Version

-

Onnx name: QLinearMatMul

-

This version of the operator has been available since -version 10.

-

Summary

-

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. -It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, -and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. -Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor -(per row for ‘a’ and per column for ‘b’). Scalar refers to per tensor quantization whereas N-D refers to per row -or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be -an M element vector [v_1, v_2, …, v_M] for per row quantization and K element vector of shape [v_1, v_2, …, v_K] -for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may -have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. -Production must never overflow, and accumulation may overflow if and only if in 32 bits.

-

Inputs

-
    -
  • a (heterogeneous)T1: N-dimensional quantized matrix a

  • -
  • a_scale (heterogeneous)tensor(float): scale of quantized input a

  • -
  • a_zero_point (heterogeneous)T1: zero point of quantized input a

  • -
  • b (heterogeneous)T2: N-dimensional quantized matrix b

  • -
  • b_scale (heterogeneous)tensor(float): scale of quantized input b

  • -
  • b_zero_point (heterogeneous)T2: zero point of quantized input b

  • -
  • y_scale (heterogeneous)tensor(float): scale of quantized output y

  • -
  • y_zero_point (heterogeneous)T3: zero point of quantized output y

  • -
-

Outputs

-
    -
  • y (heterogeneous)T3: Quantized matrix multiply results from a * b

  • -
-

Type Constraints

-
    -
  • T1 tensor(int8), tensor(uint8): Constrain input a and its zero point data type to 8-bit integer tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain input b and its zero point data type to 8-bit integer tensor.

  • -
  • T3 tensor(int8), tensor(uint8): Constrain output y and its zero point data type to 8-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQuantizeLinear#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear(*args, **kwargs)#
-

Version

-

Onnx name: QuantizeLinear

-

This version of the operator has been available since -version 13.

-

Summary

-

The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. -The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -The quantization formula is y = saturate ((x / y_scale) + y_zero_point). -For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. -For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

-

Attributes

-
    -
  • axis: (Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • -
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization.

  • -
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. Shape must match y_scale. Default is uint8 with zero point of 0 if it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQuantizeLinear_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_10(*args, **kwargs)#
-

Version

-

Onnx name: QuantizeLinear

-

This version of the operator has been available since -version 10.

-

Summary

-

The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor. -The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. -For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • -
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • -
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. It’s a scalar, which means a per-tensor/layer quantization. Default value is uint8 typed 0 if it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxQuantizeLinear_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_13(*args, **kwargs)#
-

Version

-

Onnx name: QuantizeLinear

-

This version of the operator has been available since -version 13.

-

Summary

-

The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. -The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -The quantization formula is y = saturate ((x / y_scale) + y_zero_point). -For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. -For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

-

Attributes

-
    -
  • axis: (Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • -
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization.

  • -
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. Shape must match y_scale. Default is uint8 with zero point of 0 if it’s not specified.

  • -
-

Outputs

-
    -
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • -
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • -
-
- -
-
-
-
-

OnnxRNN#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRNN(*args, **kwargs)#
-

Version

-

Onnx name: RNN

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer simple RNN. This operator is usually supported -via some custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

t - time step (t-1 means previous time step)

-

Wi - W parameter weight matrix for input gate

-

Ri - R recurrence weight matrix for input gate

-

Wbi - W parameter bias vector for input gate

-

Rbi - R parameter bias vector for input gate

-

WBi - W parameter weight matrix for backward input gate

-

RBi - R recurrence weight matrix for backward input gate

-

WBbi - WR bias vectors for backward input gate

-

RBbi - RR bias vectors for backward input gate

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Tanh):

-
-
    -
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is -name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxRNN_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRNN_1(*args, **kwargs)#
-

Version

-

Onnx name: RNN

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes an one-layer simple RNN. This operator is usually supported -via some custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

t - time step (t-1 means previous time step)

-

Wi - W parameter weight matrix for input gate

-

Ri - R recurrence weight matrix for input gate

-

Wbi - W parameter bias vector for input gate

-

Rbi - R parameter bias vector for input gate

-

WBi - W parameter weight matrix for backward input gate

-

RBi - R recurrence weight matrix for backward input gate

-

WBbi - WR bias vectors for backward input gate

-

RBbi - RR bias vectors for backward input gate

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Tanh):

-
-
    -
  • Ht = f(Xt*(Wi^T) + Ht-1*Ri + Wbi + Rbi)

  • -
-
-

Attributes

-
    -
  • -
  • -
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is -name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is -name: "output_sequence" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxRNN_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRNN_14(*args, **kwargs)#
-

Version

-

Onnx name: RNN

-

This version of the operator has been available since -version 14.

-

Summary

-

Computes an one-layer simple RNN. This operator is usually supported -via some custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

t - time step (t-1 means previous time step)

-

Wi - W parameter weight matrix for input gate

-

Ri - R recurrence weight matrix for input gate

-

Wbi - W parameter bias vector for input gate

-

Rbi - R parameter bias vector for input gate

-

WBi - W parameter weight matrix for backward input gate

-

RBi - R recurrence weight matrix for backward input gate

-

WBbi - WR bias vectors for backward input gate

-

RBbi - RR bias vectors for backward input gate

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Tanh):

-
-
    -
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is -name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is -name: "layout" i: 0 type: INT

  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxRNN_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRNN_7(*args, **kwargs)#
-

Version

-

Onnx name: RNN

-

This version of the operator has been available since -version 7.

-

Summary

-

Computes an one-layer simple RNN. This operator is usually supported -via some custom implementation such as CuDNN.

-

Notations:

-

X - input tensor

-

i - input gate

-

t - time step (t-1 means previous time step)

-

Wi - W parameter weight matrix for input gate

-

Ri - R recurrence weight matrix for input gate

-

Wbi - W parameter bias vector for input gate

-

Rbi - R parameter bias vector for input gate

-

WBi - W parameter weight matrix for backward input gate

-

RBi - R recurrence weight matrix for backward input gate

-

WBbi - WR bias vectors for backward input gate

-

RBbi - RR bias vectors for backward input gate

-

H - Hidden state

-

num_directions - 2 if direction == bidirectional else 1

-

Activation functions:

-
-

Relu(x) - max(0, x)

-

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

-

Sigmoid(x) - 1/(1 + e^{-x})

-

(NOTE: Below are optional)

-

Affine(x) - alpha*x + beta

-

LeakyRelu(x) - x if x >= 0 else alpha * x

-

ThresholdedRelu(x) - x if x >= alpha else 0

-

ScaledTanh(x) - alpha*Tanh(beta*x)

-

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

-

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

-

Softsign(x) - x/(1 + |x|)

-

Softplus(x) - log(1 + e^x)

-
-

Equations (Default: f=Tanh):

-
-
    -
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • -
-
-

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

-

Attributes

-
    -
  • -
  • -
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is -name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • -
  • -
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is -name: "direction" s: "forward" type: STRING

  • -
  • -
-

Inputs

-

Between 3 and 6 inputs.

-
    -
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • -
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • -
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • -
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • -
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • -
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Outputs

-

Between 0 and 2 outputs.

-
    -
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • -
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • -
-
- -
-
-
-
-

OnnxRandomNormal#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomNormal(*args, **kwargs)#
-

Version

-

Onnx name: RandomNormal

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a normal distribution. The shape -of the tensor is specified by the shape argument and the parameter of the normal distribution -specified by mean and scale.

-

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must -be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message.

-

Attributes

-
    -
  • dtype: The data type for the elements of the output tensor. Default is TensorProto::FLOAT. Default value is -name: "dtype" i: 1 type: INT

  • -
  • mean: The mean of the normal distribution. Default value is -name: "mean" f: 0.0 type: FLOAT

  • -
  • scale: The standard deviation of the normal distribution. Default value is -name: "scale" f: 1.0 type: FLOAT

  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of random values drawn from normal distribution

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomNormalLike#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike(*args, **kwargs)#
-

Version

-

Onnx name: RandomNormalLike

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a normal distribution. -The shape of the output tensor is copied from the shape of the input tensor, -and the parameters of the normal distribution are specified by mean and scale.

-

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message, and be valid as an output type.

-

Attributes

-
    -
  • -
  • mean: The mean of the normal distribution. Default value is -name: "mean" f: 0.0 type: FLOAT

  • -
  • scale: The standard deviation of the normal distribution. Default value is -name: "scale" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of random values drawn from normal distribution

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomNormalLike_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike_1(*args, **kwargs)#
-

Version

-

Onnx name: RandomNormalLike

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a normal distribution. -The shape of the output tensor is copied from the shape of the input tensor, -and the parameters of the normal distribution are specified by mean and scale.

-

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message, and be valid as an output type.

-

Attributes

-
    -
  • -
  • mean: The mean of the normal distribution. Default value is -name: "mean" f: 0.0 type: FLOAT

  • -
  • scale: The standard deviation of the normal distribution. Default value is -name: "scale" f: 1.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of random values drawn from normal distribution

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomNormal_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomNormal_1(*args, **kwargs)#
-

Version

-

Onnx name: RandomNormal

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a normal distribution. The shape -of the tensor is specified by the shape argument and the parameter of the normal distribution -specified by mean and scale.

-

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must -be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message.

-

Attributes

-
    -
  • dtype: The data type for the elements of the output tensor. Default is TensorProto::FLOAT. Default value is -name: "dtype" i: 1 type: INT

  • -
  • mean: The mean of the normal distribution. Default value is -name: "mean" f: 0.0 type: FLOAT

  • -
  • scale: The standard deviation of the normal distribution. Default value is -name: "scale" f: 1.0 type: FLOAT

  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of random values drawn from normal distribution

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomUniform#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomUniform(*args, **kwargs)#
-

Version

-

Onnx name: RandomUniform

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a uniform distribution. The shape -of the tensor is specified by the shape argument and the range by low and high.

-

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must -be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message.

-

Attributes

-
    -
  • dtype: The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT. Default value is -name: "dtype" i: 1 type: INT

  • -
  • high: Upper boundary of the output values. Default value is -name: "high" f: 1.0 type: FLOAT

  • -
  • low: Lower boundary of the output values. Default value is -name: "low" f: 0.0 type: FLOAT

  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of random values drawn from uniform distribution

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomUniformLike#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike(*args, **kwargs)#
-

Version

-

Onnx name: RandomUniformLike

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a uniform distribution. -The shape of the output tensor is copied from the shape of the input tensor, -and the parameters of the uniform distribution are specified by low and high.

-

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message and be valid as an output type.

-

Attributes

-
    -
  • -
  • high: Upper boundary of the output values. Default value is -name: "high" f: 1.0 type: FLOAT

  • -
  • low: Lower boundary of the output values. Default value is -name: "low" f: 0.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of random values drawn from uniform distribution

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomUniformLike_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike_1(*args, **kwargs)#
-

Version

-

Onnx name: RandomUniformLike

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a uniform distribution. -The shape of the output tensor is copied from the shape of the input tensor, -and the parameters of the uniform distribution are specified by low and high.

-

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. -The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message and be valid as an output type.

-

Attributes

-
    -
  • -
  • high: Upper boundary of the output values. Default value is -name: "high" f: 1.0 type: FLOAT

  • -
  • low: Lower boundary of the output values. Default value is -name: "low" f: 0.0 type: FLOAT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T2: Output tensor of random values drawn from uniform distribution

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRandomUniform_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRandomUniform_1(*args, **kwargs)#
-

Version

-

Onnx name: RandomUniform

-

This version of the operator has been available since -version 1.

-

Summary

-

Generate a tensor with random values drawn from a uniform distribution. The shape -of the tensor is specified by the shape argument and the range by low and high.

-

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must -be one of the data types specified in the ‘DataType’ enum field in the -TensorProto message.

-

Attributes

-
    -
  • dtype: The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT. Default value is -name: "dtype" i: 1 type: INT

  • -
  • high: Upper boundary of the output values. Default value is -name: "high" f: 1.0 type: FLOAT

  • -
  • low: Lower boundary of the output values. Default value is -name: "low" f: 0.0 type: FLOAT

  • -
  • -
  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of random values drawn from uniform distribution

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRange#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRange(*args, **kwargs)#
-

Version

-

Onnx name: Range

-

This version of the operator has been available since -version 11.

-

Summary

-

Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta -up to limit (exclusive).

-

The number of elements in the output of range is computed as below-

-

number_of_elements = max( ceil( (limit - start) / delta ) , 0 )

-

The pseudocode determining the contents of the output is shown below-

-

for(int i=0; i<number_of_elements; ++i)

-

{

-

` output[i] = start + (i * delta); `

-

}

-

Example 1 -Inputs: start = 3, limit = 9, delta = 3 -Output: [3, 6]

-

Example 2 -Inputs: start = 10, limit = 4, delta = -2 -Output: [10, 8, 6]

-

Inputs

-
    -
  • start (heterogeneous)T: Scalar. First entry for the range of output values.

  • -
  • limit (heterogeneous)T: Scalar. Exclusive upper limit for the range of output values.

  • -
  • delta (heterogeneous)T: Scalar. Value to step by.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 1-D tensor with same type as the inputs containing generated range of values.

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int16), tensor(int32), tensor(int64): Constrain input types to common numeric type tensors.

  • -
-
- -
-
-
-
-

OnnxRange_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRange_11(*args, **kwargs)#
-

Version

-

Onnx name: Range

-

This version of the operator has been available since -version 11.

-

Summary

-

Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta -up to limit (exclusive).

-

The number of elements in the output of range is computed as below-

-

number_of_elements = max( ceil( (limit - start) / delta ) , 0 )

-

The pseudocode determining the contents of the output is shown below-

-

for(int i=0; i<number_of_elements; ++i)

-

{

-

` output[i] = start + (i * delta); `

-

}

-

Example 1 -Inputs: start = 3, limit = 9, delta = 3 -Output: [3, 6]

-

Example 2 -Inputs: start = 10, limit = 4, delta = -2 -Output: [10, 8, 6]

-

Inputs

-
    -
  • start (heterogeneous)T: Scalar. First entry for the range of output values.

  • -
  • limit (heterogeneous)T: Scalar. Exclusive upper limit for the range of output values.

  • -
  • delta (heterogeneous)T: Scalar. Value to step by.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: A 1-D tensor with same type as the inputs containing generated range of values.

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int16), tensor(int32), tensor(int64): Constrain input types to common numeric type tensors.

  • -
-
- -
-
-
-
-

OnnxReciprocal#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReciprocal(*args, **kwargs)#
-

Version

-

Onnx name: Reciprocal

-

This version of the operator has been available since -version 13.

-

Summary

-

Reciprocal takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the reciprocal is, y = 1/x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReciprocal_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReciprocal_1(*args, **kwargs)#
-

Version

-

Onnx name: Reciprocal

-

This version of the operator has been available since -version 1.

-

Summary

-

Reciprocal takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the reciprocal is, y = 1/x, is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReciprocal_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReciprocal_13(*args, **kwargs)#
-

Version

-

Onnx name: Reciprocal

-

This version of the operator has been available since -version 13.

-

Summary

-

Reciprocal takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the reciprocal is, y = 1/x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReciprocal_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReciprocal_6(*args, **kwargs)#
-

Version

-

Onnx name: Reciprocal

-

This version of the operator has been available since -version 6.

-

Summary

-

Reciprocal takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the reciprocal is, y = 1/x, is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL1

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the L1 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL1_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL1_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL1

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the L1 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL1_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL1_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL1

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the L1 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL1_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL1_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL1

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the L1 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL2(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL2

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the L2 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL2_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL2_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL2

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the L2 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL2_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL2_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL2

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the L2 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceL2_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceL2_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceL2

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the L2 norm of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSum

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the log sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSumExp#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSumExp

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSumExp_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSumExp

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSumExp_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSumExp

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSumExp_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSumExp

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSum_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSum

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the log sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSum_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSum

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the log sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceLogSum_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceLogSum

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the log sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMax(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMax

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the max of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMax_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMax

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the max of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMax_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMax_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMax

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the max of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMax_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMax_12(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMax

-

This version of the operator has been available since -version 12.

-

Summary

-

Computes the max of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMax_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMax

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the max of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMean#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMean(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMean

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the mean of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMean_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMean_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMean

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the mean of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMean_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMean_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMean

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the mean of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMean_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMean_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMean

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the mean of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMin#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMin(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMin

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the min of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMin_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMin_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMin

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the min of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMin_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMin_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMin

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the min of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMin_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMin_12(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMin

-

This version of the operator has been available since -version 12.

-

Summary

-

Computes the min of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceMin_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceMin_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceMin

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the min of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceProd#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceProd(*args, **kwargs)#
-

Version

-

Onnx name: ReduceProd

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the product of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceProd_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceProd_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceProd

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the product of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceProd_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceProd_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceProd

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the product of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceProd_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceProd_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceProd

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the product of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSum(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSum

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • noop_with_empty_axes: Defines behaviour if ‘axes’ is empty. Default behaviour with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is -name: "noop_with_empty_axes" i: 0 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSumSquare#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSumSquare

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the sum square of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSumSquare_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSumSquare

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the sum square of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSumSquare_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSumSquare

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the sum square of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSumSquare_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSumSquare

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the sum square of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSum_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSum_1(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSum

-

This version of the operator has been available since -version 1.

-

Summary

-

Computes the sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSum_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSum_11(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSum

-

This version of the operator has been available since -version 11.

-

Summary

-

Computes the sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxReduceSum_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReduceSum_13(*args, **kwargs)#
-

Version

-

Onnx name: ReduceSum

-

This version of the operator has been available since -version 13.

-

Summary

-

Computes the sum of the input tensor’s element along the provided axes. The resulted -tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned.

-

The above behavior is similar to numpy, with the exception that numpy default keepdims to -False instead of True.

-

Attributes

-
    -
  • keepdims: Keep the reduced dimension or not, default 1 mean keep reduced dimension. Default value is -name: "keepdims" i: 1 type: INT

  • -
  • noop_with_empty_axes: Defines behaviour if ‘axes’ is empty. Default behaviour with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is -name: "noop_with_empty_axes" i: 0 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • -
-

Outputs

-
    -
  • reduced (heterogeneous)T: Reduced output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxRelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRelu(*args, **kwargs)#
-

Version

-

Onnx name: Relu

-

This version of the operator has been available since -version 14.

-

Summary

-

Relu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • -
-
- -
-
-
-
-

OnnxRelu_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRelu_1(*args, **kwargs)#
-

Version

-

Onnx name: Relu

-

This version of the operator has been available since -version 1.

-

Summary

-

Relu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to -the tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRelu_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRelu_13(*args, **kwargs)#
-

Version

-

Onnx name: Relu

-

This version of the operator has been available since -version 13.

-

Summary

-

Relu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRelu_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRelu_14(*args, **kwargs)#
-

Version

-

Onnx name: Relu

-

This version of the operator has been available since -version 14.

-

Summary

-

Relu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • -
-
- -
-
-
-
-

OnnxRelu_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRelu_6(*args, **kwargs)#
-

Version

-

Onnx name: Relu

-

This version of the operator has been available since -version 6.

-

Summary

-

Relu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReshape#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReshape(*args, **kwargs)#
-

Version

-

Onnx name: Reshape

-

This version of the operator has been available since -version 14.

-

Summary

-

Reshape the input tensor similar to numpy.reshape. -First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. -At most one dimension of the new shape can be -1. In this case, the value is -inferred from the size of the tensor and the remaining dimensions. A dimension -could also be 0, in which case the actual dimension value is unchanged (i.e. taken -from the input tensor). If ‘allowzero’ is set, and the new shape includes 0, the -dimension will be set explicitly to zero (i.e. not taken from input tensor). -Shape (second input) could be an empty shape, which means converting to a scalar. -The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

-

Attributes

-
    -
  • allowzero: (Optional) By default, when any value in the ‘shape’ input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the ‘shape’ input is set to zero, the zero value is honored, similar to NumPy. Default value is -name: "allowzero" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • -
-

Outputs

-
    -
  • reshaped (heterogeneous)T: Reshaped data.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxReshape_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReshape_1(*args, **kwargs)#
-

Version

-

Onnx name: Reshape

-

This version of the operator has been available since -version 1.

-

Summary

-

Reshape the input tensor similar to numpy.reshape. -It takes a tensor as input and an argument shape. It outputs the reshaped tensor. -At most one dimension of the new shape can be -1. In this case, the value is -inferred from the size of the tensor and the remaining dimensions. A dimension -could also be 0, in which case the actual dimension value is unchanged (i.e. taken -from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. -The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • reshaped (heterogeneous)T: Reshaped data.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxReshape_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReshape_13(*args, **kwargs)#
-

Version

-

Onnx name: Reshape

-

This version of the operator has been available since -version 13.

-

Summary

-

Reshape the input tensor similar to numpy.reshape. -First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. -At most one dimension of the new shape can be -1. In this case, the value is -inferred from the size of the tensor and the remaining dimensions. A dimension -could also be 0, in which case the actual dimension value is unchanged (i.e. taken -from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. -The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • -
-

Outputs

-
    -
  • reshaped (heterogeneous)T: Reshaped data.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxReshape_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReshape_14(*args, **kwargs)#
-

Version

-

Onnx name: Reshape

-

This version of the operator has been available since -version 14.

-

Summary

-

Reshape the input tensor similar to numpy.reshape. -First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. -At most one dimension of the new shape can be -1. In this case, the value is -inferred from the size of the tensor and the remaining dimensions. A dimension -could also be 0, in which case the actual dimension value is unchanged (i.e. taken -from the input tensor). If ‘allowzero’ is set, and the new shape includes 0, the -dimension will be set explicitly to zero (i.e. not taken from input tensor). -Shape (second input) could be an empty shape, which means converting to a scalar. -The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

-

Attributes

-
    -
  • allowzero: (Optional) By default, when any value in the ‘shape’ input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the ‘shape’ input is set to zero, the zero value is honored, similar to NumPy. Default value is -name: "allowzero" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • -
-

Outputs

-
    -
  • reshaped (heterogeneous)T: Reshaped data.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxReshape_5#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReshape_5(*args, **kwargs)#
-

Version

-

Onnx name: Reshape

-

This version of the operator has been available since -version 5.

-

Summary

-

Reshape the input tensor similar to numpy.reshape. -First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. -At most one dimension of the new shape can be -1. In this case, the value is -inferred from the size of the tensor and the remaining dimensions. A dimension -could also be 0, in which case the actual dimension value is unchanged (i.e. taken -from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. -The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • -
-

Outputs

-
    -
  • reshaped (heterogeneous)T: Reshaped data.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxResize#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxResize(*args, **kwargs)#
-

Version

-

Onnx name: Resize

-

This version of the operator has been available since -version 13.

-

Summary

-

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input "sizes" is not specified.

-
-

Attributes

-
    -
  • coordinate_transformation_mode:

  • -
-

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

-

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. -Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

-

if coordinate_transformation_mode is “half_pixel”, <br/> -x_original = (x_resized + 0.5) / scale - 0.5, <br/>

-

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> -x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, <br/>

-

if coordinate_transformation_mode is “align_corners”, <br/> -x_original = x_resized * (length_original - 1) / (length_resized - 1), <br/>

-

if coordinate_transformation_mode is “asymmetric”, <br/> -x_original = x_resized / scale, <br/>

-

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> -x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1). Default value is

-
-

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

-
-
    -
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if “mode” is “cubic”. Default value is -name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • -
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is -name: "exclude_outside" i: 0 type: INT

  • -
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is -name: "extrapolation_value" f: 0.0 type: FLOAT

  • -
  • mode: Three interpolation modes: nearest (default), linear and cubic. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is -name: "mode" s: "nearest" type: STRING

  • -
  • nearest_mode: Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is -name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • -
-

Inputs

-

Between 1 and 4 inputs.

-
    -
  • X (heterogeneous)T1: N-D tensor

  • -
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • -
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • -
  • sizes (optional, heterogeneous)tensor(int64): The size of the output tensor. The number of elements of ‘sizes’ should be the same as the rank of input ‘X’. Only one of ‘scales’ and ‘sizes’ can be specified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • -
-
- -
-
-
-
-

OnnxResize_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxResize_10(*args, **kwargs)#
-

Version

-

Onnx name: Resize

-

This version of the operator has been available since -version 10.

-

Summary

-

Resize the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * scale).

-
-

Attributes

-
    -
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is -name: "mode" s: "nearest" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: N-D tensor

  • -
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
-
- -
-
-
-
-

OnnxResize_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxResize_11(*args, **kwargs)#
-

Version

-

Onnx name: Resize

-

This version of the operator has been available since -version 11.

-

Summary

-

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input "sizes" is not specified.

-
-

Attributes

-
    -
  • coordinate_transformation_mode:

  • -
-

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

-

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. -Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

-

if coordinate_transformation_mode is “half_pixel”, <br/> -x_original = (x_resized + 0.5) / scale - 0.5, <br/>

-

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> -x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, <br/>

-

if coordinate_transformation_mode is “align_corners”, <br/> -x_original = x_resized * (length_original - 1) / (length_resized - 1), <br/>

-

if coordinate_transformation_mode is “asymmetric”, <br/> -x_original = x_resized / scale, <br/>

-

if coordinate_transformation_mode is “tf_half_pixel_for_nn”, <br/> -x_original = (x_resized + 0.5) / scale, <br/>

-

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> -x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1). Default value is

-
-

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

-
-
    -
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if “mode” is “cubic”. Default value is -name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • -
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is -name: "exclude_outside" i: 0 type: INT

  • -
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is -name: "extrapolation_value" f: 0.0 type: FLOAT

  • -
  • mode: Three interpolation modes: nearest (default), linear and cubic. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is -name: "mode" s: "nearest" type: STRING

  • -
  • nearest_mode: Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is -name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • -
-

Inputs

-

Between 3 and 4 inputs.

-
    -
  • X (heterogeneous)T1: N-D tensor

  • -
  • roi (heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • -
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’. If ‘size’ is needed, the user must set ‘scales’ to an empty tensor.

  • -
  • sizes (optional, heterogeneous)tensor(int64): The size of the output tensor. The number of elements of ‘sizes’ should be the same as the rank of input ‘X’. May only be set if ‘scales’ is set to an empty tensor.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • -
-
- -
-
-
-
-

OnnxResize_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxResize_13(*args, **kwargs)#
-

Version

-

Onnx name: Resize

-

This version of the operator has been available since -version 13.

-

Summary

-

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input "sizes" is not specified.

-
-

Attributes

-
    -
  • coordinate_transformation_mode:

  • -
-

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

-

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. -Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

-

if coordinate_transformation_mode is “half_pixel”, <br/> -x_original = (x_resized + 0.5) / scale - 0.5, <br/>

-

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> -x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, <br/>

-

if coordinate_transformation_mode is “align_corners”, <br/> -x_original = x_resized * (length_original - 1) / (length_resized - 1), <br/>

-

if coordinate_transformation_mode is “asymmetric”, <br/> -x_original = x_resized / scale, <br/>

-

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> -x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1). Default value is

-
-

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

-
-
    -
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if “mode” is “cubic”. Default value is -name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • -
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is -name: "exclude_outside" i: 0 type: INT

  • -
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is -name: "extrapolation_value" f: 0.0 type: FLOAT

  • -
  • mode: Three interpolation modes: nearest (default), linear and cubic. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is -name: "mode" s: "nearest" type: STRING

  • -
  • nearest_mode: Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is -name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • -
-

Inputs

-

Between 1 and 4 inputs.

-
    -
  • X (heterogeneous)T1: N-D tensor

  • -
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • -
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • -
  • sizes (optional, heterogeneous)tensor(int64): The size of the output tensor. The number of elements of ‘sizes’ should be the same as the rank of input ‘X’. Only one of ‘scales’ and ‘sizes’ can be specified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • -
-
- -
-
-
-
-

OnnxReverseSequence#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReverseSequence(*args, **kwargs)#
-

Version

-

Onnx name: ReverseSequence

-

This version of the operator has been available since -version 10.

-

Summary

-

Reverse batch of sequences having different lengths specified by sequence_lens.

-

For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, -and copies elements whose index’s beyond sequence_lens[i] to the output. So the output slice i contains reversed -sequences on the first sequence_lens[i] elements, then have original values copied for the other elements.

-
-
Example 1:
-
input = [[0.0, 4.0, 8.0, 12.0],

[1.0, 5.0, 9.0, 13.0], -[2.0, 6.0, 10.0, 14.0], -[3.0, 7.0, 11.0, 15.0]]

-
-
-

sequence_lens = [4, 3, 2, 1] -time_axis = 0 -batch_axis = 1

-
-
output = [[3.0, 6.0, 9.0, 12.0],

[2.0, 5.0, 8.0, 13.0], -[1.0, 4.0, 10.0, 14.0], -[0.0, 7.0, 11.0, 15.0]]

-
-
-
-
Example 2:
-
input = [[0.0, 1.0, 2.0, 3.0 ],

[4.0, 5.0, 6.0, 7.0 ], -[8.0, 9.0, 10.0, 11.0], -[12.0, 13.0, 14.0, 15.0]]

-
-
-

sequence_lens = [1, 2, 3, 4] -time_axis = 1 -batch_axis = 0

-
-
output = [[0.0, 1.0, 2.0, 3.0 ],

[5.0, 4.0, 6.0, 7.0 ], -[10.0, 9.0, 8.0, 11.0], -[15.0, 14.0, 13.0, 12.0]]

-
-
-
-
-

Attributes

-
    -
  • batch_axis: (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. Default value is -name: "batch_axis" i: 1 type: INT

  • -
  • time_axis: (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. Default value is -name: "time_axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Tensor of rank r >= 2.

  • -
  • sequence_lens (heterogeneous)tensor(int64): Tensor specifying lengths of the sequences in a batch. It has shape [batch_size].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Tensor with same shape of input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
-
- -
-
-
-
-

OnnxReverseSequence_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxReverseSequence_10(*args, **kwargs)#
-

Version

-

Onnx name: ReverseSequence

-

This version of the operator has been available since -version 10.

-

Summary

-

Reverse batch of sequences having different lengths specified by sequence_lens.

-

For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, -and copies elements whose index’s beyond sequence_lens[i] to the output. So the output slice i contains reversed -sequences on the first sequence_lens[i] elements, then have original values copied for the other elements.

-
-
Example 1:
-
input = [[0.0, 4.0, 8.0, 12.0],

[1.0, 5.0, 9.0, 13.0], -[2.0, 6.0, 10.0, 14.0], -[3.0, 7.0, 11.0, 15.0]]

-
-
-

sequence_lens = [4, 3, 2, 1] -time_axis = 0 -batch_axis = 1

-
-
output = [[3.0, 6.0, 9.0, 12.0],

[2.0, 5.0, 8.0, 13.0], -[1.0, 4.0, 10.0, 14.0], -[0.0, 7.0, 11.0, 15.0]]

-
-
-
-
Example 2:
-
input = [[0.0, 1.0, 2.0, 3.0 ],

[4.0, 5.0, 6.0, 7.0 ], -[8.0, 9.0, 10.0, 11.0], -[12.0, 13.0, 14.0, 15.0]]

-
-
-

sequence_lens = [1, 2, 3, 4] -time_axis = 1 -batch_axis = 0

-
-
output = [[0.0, 1.0, 2.0, 3.0 ],

[5.0, 4.0, 6.0, 7.0 ], -[10.0, 9.0, 8.0, 11.0], -[15.0, 14.0, 13.0, 12.0]]

-
-
-
-
-

Attributes

-
    -
  • batch_axis: (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. Default value is -name: "batch_axis" i: 1 type: INT

  • -
  • time_axis: (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. Default value is -name: "time_axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Tensor of rank r >= 2.

  • -
  • sequence_lens (heterogeneous)tensor(int64): Tensor specifying lengths of the sequences in a batch. It has shape [batch_size].

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Tensor with same shape of input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
-
- -
-
-
-
-

OnnxRoiAlign#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRoiAlign(*args, **kwargs)#
-

Version

-

Onnx name: RoiAlign

-

This version of the operator has been available since -version 16.

-

Summary

-

Region of Interest (RoI) align operation described in the -[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). -RoiAlign consumes an input tensor X and region of interests (rois) -to apply pooling across each RoI; it produces a 4-D tensor of shape -(num_rois, C, output_height, output_width).

-

RoiAlign is proposed to avoid the misalignment by removing -quantizations while converting from original image into feature -map and from feature map into RoI feature; in each ROI bin, -the value of the sampled locations are computed directly -through bilinear interpolation.

-

Attributes

-
    -
  • coordinate_transformation_mode: Allowed values are ‘half_pixel’ and ‘output_half_pixel’. Use the value ‘half_pixel’ to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value ‘output_half_pixel’ to omit the pixel shift for the input (use this for a backward-compatible behavior). Default value is -name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

  • -
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is -name: "mode" s: "avg" type: STRING

  • -
  • output_height: default 1; Pooled output Y’s height. Default value is -name: "output_height" i: 1 type: INT

  • -
  • output_width: default 1; Pooled output Y’s width. Default value is -name: "output_width" i: 1 type: INT

  • -
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is -name: "sampling_ratio" i: 0 type: INT

  • -
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is -name: "spatial_scale" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • -
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • -
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • -
  • T2 tensor(int64): Constrain types to int tensors.

  • -
-
- -
-
-
-
-

OnnxRoiAlign_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRoiAlign_10(*args, **kwargs)#
-

Version

-

Onnx name: RoiAlign

-

This version of the operator has been available since -version 10.

-

Summary

-

Region of Interest (RoI) align operation described in the -[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). -RoiAlign consumes an input tensor X and region of interests (rois) -to apply pooling across each RoI; it produces a 4-D tensor of shape -(num_rois, C, output_height, output_width).

-

RoiAlign is proposed to avoid the misalignment by removing -quantizations while converting from original image into feature -map and from feature map into RoI feature; in each ROI bin, -the value of the sampled locations are computed directly -through bilinear interpolation.

-

Attributes

-
    -
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is -name: "mode" s: "avg" type: STRING

  • -
  • output_height: default 1; Pooled output Y’s height. Default value is -name: "output_height" i: 1 type: INT

  • -
  • output_width: default 1; Pooled output Y’s width. Default value is -name: "output_width" i: 1 type: INT

  • -
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is -name: "sampling_ratio" i: 0 type: INT

  • -
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is -name: "spatial_scale" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • -
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • -
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • -
  • T2 tensor(int64): Constrain types to int tensors.

  • -
-
- -
-
-
-
-

OnnxRoiAlign_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRoiAlign_16(*args, **kwargs)#
-

Version

-

Onnx name: RoiAlign

-

This version of the operator has been available since -version 16.

-

Summary

-

Region of Interest (RoI) align operation described in the -[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). -RoiAlign consumes an input tensor X and region of interests (rois) -to apply pooling across each RoI; it produces a 4-D tensor of shape -(num_rois, C, output_height, output_width).

-

RoiAlign is proposed to avoid the misalignment by removing -quantizations while converting from original image into feature -map and from feature map into RoI feature; in each ROI bin, -the value of the sampled locations are computed directly -through bilinear interpolation.

-

Attributes

-
    -
  • coordinate_transformation_mode: Allowed values are ‘half_pixel’ and ‘output_half_pixel’. Use the value ‘half_pixel’ to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value ‘output_half_pixel’ to omit the pixel shift for the input (use this for a backward-compatible behavior). Default value is -name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

  • -
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is -name: "mode" s: "avg" type: STRING

  • -
  • output_height: default 1; Pooled output Y’s height. Default value is -name: "output_height" i: 1 type: INT

  • -
  • output_width: default 1; Pooled output Y’s width. Default value is -name: "output_width" i: 1 type: INT

  • -
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is -name: "sampling_ratio" i: 0 type: INT

  • -
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is -name: "spatial_scale" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • -
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • -
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • -
  • T2 tensor(int64): Constrain types to int tensors.

  • -
-
- -
-
-
-
-

OnnxRound#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRound(*args, **kwargs)#
-

Version

-

Onnx name: Round

-

This version of the operator has been available since -version 11.

-

Summary

-

Round takes one input Tensor and rounds the values, element-wise, meaning -it finds the nearest integer for each value. -In case of halfs, the rule is to round them to the nearest even integer. -The output tensor has the same shape and type as the input.

-

Examples:

-
round([0.9]) = [1.0]
-round([2.5]) = [2.0]
-round([2.3]) = [2.0]
-round([1.5]) = [2.0]
-round([-4.5]) = [-4.0]
-
-
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxRound_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxRound_11(*args, **kwargs)#
-

Version

-

Onnx name: Round

-

This version of the operator has been available since -version 11.

-

Summary

-

Round takes one input Tensor and rounds the values, element-wise, meaning -it finds the nearest integer for each value. -In case of halfs, the rule is to round them to the nearest even integer. -The output tensor has the same shape and type as the input.

-

Examples:

-
round([0.9]) = [1.0]
-round([2.5]) = [2.0]
-round([2.3]) = [2.0]
-round([1.5]) = [2.0]
-round([-4.5]) = [-4.0]
-
-
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSVMClassifier#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSVMClassifier(*args, **kwargs)#
-

Version

-

Onnx name: SVMClassifier

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Support Vector Machine classifier

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is -name: "kernel_type" s: "LINEAR" type: STRING

  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Data to be classified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • -
  • Z (heterogeneous)tensor(float): Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, either [C] or [N,C].

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.

  • -
-
- -
-
-
-
-

OnnxSVMClassifier_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSVMClassifier_1(*args, **kwargs)#
-

Version

-

Onnx name: SVMClassifier

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Support Vector Machine classifier

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is -name: "kernel_type" s: "LINEAR" type: STRING

  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Data to be classified.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • -
  • Z (heterogeneous)tensor(float): Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores.

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, either [C] or [N,C].

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.

  • -
-
- -
-
-
-
-

OnnxSVMRegressor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSVMRegressor(*args, **kwargs)#
-

Version

-

Onnx name: SVMRegressor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Support Vector Machine regression prediction and one-class SVM anomaly detection.

-

Attributes

-
    -
  • -
  • -
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is -name: "kernel_type" s: "LINEAR" type: STRING

  • -
  • n_supports: The number of support vectors. Default value is -name: "n_supports" i: 0 type: INT

  • -
  • one_class: Flag indicating whether the regression is a one-class SVM or not. Default value is -name: "one_class" i: 0 type: INT

  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be regressed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Regression outputs (one score per target per example).

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [C] or [N,C].

  • -
-
- -
-
-
-
-

OnnxSVMRegressor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSVMRegressor_1(*args, **kwargs)#
-

Version

-

Onnx name: SVMRegressor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Support Vector Machine regression prediction and one-class SVM anomaly detection.

-

Attributes

-
    -
  • -
  • -
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is -name: "kernel_type" s: "LINEAR" type: STRING

  • -
  • n_supports: The number of support vectors. Default value is -name: "n_supports" i: 0 type: INT

  • -
  • one_class: Flag indicating whether the regression is a one-class SVM or not. Default value is -name: "one_class" i: 0 type: INT

  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be regressed.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Regression outputs (one score per target per example).

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [C] or [N,C].

  • -
-
- -
-
-
-
-

OnnxScaler#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScaler(*args, **kwargs)#
-

Version

-

Onnx name: Scaler

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be scaled.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Scaled output data.

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxScaler_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScaler_1(*args, **kwargs)#
-

Version

-

Onnx name: Scaler

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Data to be scaled.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): Scaled output data.

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxScan#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScan(*args, **kwargs)#
-

Version

-

Onnx name: Scan

-

This version of the operator has been available since -version 16.

-

Summary

-

Scan can be used to iterate over one or more scan_input tensors, -constructing zero or more scan_output tensors. It combines ideas from general recurrences, -functional programming constructs such as scan, fold, map, and zip and is intended to enable -generalizations of RNN-like constructs for sequence-to-sequence processing. -Other tensors (referred to as state_variables here) can be used to carry a state -when iterating from one element to another (similar to hidden-state in RNNs, also referred -to as loop-carried dependences in the context of loops). -Many common usages involve a single scan_input tensor (where functionality -similar to scan, fold and map can be obtained). When more than one scan_input is used, -a behavior similar to zip is obtained.

-

The attribute body must be a graph, specifying the computation to be performed in -every iteration. It takes as input the current values of the state_variables and -the current iterated element of the scan_inputs. It must return the (updated) values -of the state_variables and zero or more scan_output_element tensors. The values of the -scan_output_element tensors are concatenated over all the iterations to produce the -scan_output values of the scan construct (similar to the concatenated intermediate -hidden-state values of RNN-like constructs). All the output tensors (state_variables as -well as scan_output_element tensors) are required to have the same shape in each iteration -of the loop (a restriction imposed to enable efficient memory allocation).

-

Note that the iterated element passed to the body subgraph does not have a sequence -axis. It will have a rank one less than the rank of the corresponding scan_input.

-

The scan operation returns the final values of the state_variables as well as the -scan_outputs.

-

The optional attribute scan_input_directions specifies the direction (forward or backward) -for each scan input. If this attribute is omitted, all sequences are scanned in the forward -direction. A bidirectional scan may be performed by specifying the same tensor input twice -in the scan_inputs, once with a forward direction, and once with a backward direction.

-

The scan_output of the operation is produced by concatenating the scan_output_element -values produced by the body in each iteration. The optional attribute scan_output_directions -specifies the direction in which scan_output is constructed (by appending or prepending the -scan_output_element to scan_output in each iteration) for each scan_output. If this attribute -is omitted, the scan_output_element is appended to the scan_output in each iteration.

-

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. -If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the -batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. -Note that scanning a non-zero axis may be less efficient than scanning axis zero.

-

The optional attribute scan_output_axes specifies the axis along which the scan_outputs -are accumulated for each scan_output. For example, if axis 1 is the time axis (to be -scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis -value of 1.

-

Note that because of the ONNX restriction that only the last parameter of an operator can -be variadic, the initial-states and scan-inputs are listed together as one input parameter. -Similarly, the final-states and scan-outputs are listed together as one output parameter. -The attribute num_scan_inputs indicates the number M of scan-inputs.

-

The behavior of

-
-
-
Scan <

num_scan_inputs = m, -body = loop-body, -scan_input_axes = [axis_1, …, axis_m]

-
-
-

> (init_1, …, init_n, scan_1, …, scan_m)

-
-

is equivalent to the following pseudo-code:

-
-

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i -// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. -sequence_length = scan_1.shape[axis_1];

-

// initialize state-variables -st_1 = init_1; … st_n = init_n; -// initialize scan-output variables: [] denotes an empty tensor -scan_out_1 = []; …; scan_out_k = []; -// identify number of iterations:

-

// execute loop -for (int t = 0; t < sequence_length; ++t) {

-
-

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor -// of rank one less than T obtained by indexing T at position t along axis k. -si_1 = scan_1<axis=axis_1>[t]; -… ; -si_m = scan_m<axis=axis_m>[t]; -// execute loop-body -st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) -// accumulate the scan-output elements -scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

-
-

}

-

return st_1, …, st_n, scan_out_1, …, scan_out_k;

-
-

Sample usage: Encoding RNN using a Scan

-

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, -recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can -be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes -%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these -values are computed in the outer graph, they need to be passed in as extra state_variables.

-
-
-
graph rnn-encoding {

%H_0 = … -%X = … -%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) -return %Y, %Y_h

-
-
-

}

-
-
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] -%X_t[FLOAT, tensor]

-
-
) {

%Wi = … -%Ri = … -%Wbi = … -%Rbi = … -%t1 = X_t * (Wi^T) -%t2 = H_tminus1*(Ri^T) -%t3 = Add(%t1, %t2) -%t4 = Add(%t3, %Wbi) -%t5 = Add(%t4, %Rbi) -%Ht = Tanh(%t5) -%Accumulate = Identity(%Ht) -return %Ht, %Accumulate

-
-
-

}

-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • -
-

Type Constraints

-
    -
  • I tensor(int64): Int64 tensor

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
-
- -
-
-
-
-

OnnxScan_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScan_11(*args, **kwargs)#
-

Version

-

Onnx name: Scan

-

This version of the operator has been available since -version 11.

-

Summary

-

Scan can be used to iterate over one or more scan_input tensors, -constructing zero or more scan_output tensors. It combines ideas from general recurrences, -functional programming constructs such as scan, fold, map, and zip and is intended to enable -generalizations of RNN-like constructs for sequence-to-sequence processing. -Other tensors (referred to as state_variables here) can be used to carry a state -when iterating from one element to another (similar to hidden-state in RNNs, also referred -to as loop-carried dependences in the context of loops). -Many common usages involve a single scan_input tensor (where functionality -similar to scan, fold and map can be obtained). When more than one scan_input is used, -a behavior similar to zip is obtained.

-

The attribute body must be a graph, specifying the computation to be performed in -every iteration. It takes as input the current values of the state_variables and -the current iterated element of the scan_inputs. It must return the (updated) values -of the state_variables and zero or more scan_output_element tensors. The values of the -scan_output_element tensors are concatenated over all the iterations to produce the -scan_output values of the scan construct (similar to the concatenated intermediate -hidden-state values of RNN-like constructs). All the output tensors (state_variables as -well as scan_output_element tensors) are required to have the same shape in each iteration -of the loop (a restriction imposed to enable efficient memory allocation).

-

Note that the iterated element passed to the body subgraph does not have a sequence -axis. It will have a rank one less than the rank of the corresponding scan_input.

-

The scan operation returns the final values of the state_variables as well as the -scan_outputs.

-

The optional attribute scan_input_directions specifies the direction (forward or backward) -for each scan input. If this attribute is omitted, all sequences are scanned in the forward -direction. A bidirectional scan may be performed by specifying the same tensor input twice -in the scan_inputs, once with a forward direction, and once with a backward direction.

-

The scan_output of the operation is produced by concatenating the scan_output_element -values produced by the body in each iteration. The optional attribute scan_output_directions -specifies the direction in which scan_output is constructed (by appending or prepending the -scan_output_element to scan_output in each iteration) for each scan_output. If this attribute -is omitted, the scan_output_element is appended to the scan_output in each iteration.

-

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. -If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the -batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. -Note that scanning a non-zero axis may be less efficient than scanning axis zero.

-

The optional attribute scan_output_axes specifies the axis along which the scan_outputs -are accumulated for each scan_output. For example, if axis 1 is the time axis (to be -scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis -value of 1.

-

Note that because of the ONNX restriction that only the last parameter of an operator can -be variadic, the initial-states and scan-inputs are listed together as one input parameter. -Similarly, the final-states and scan-outputs are listed together as one output parameter. -The attribute num_scan_inputs indicates the number M of scan-inputs.

-

The behavior of

-
-
-
Scan <

num_scan_inputs = m, -body = loop-body, -scan_input_axes = [axis_1, …, axis_m]

-
-
-

> (init_1, …, init_n, scan_1, …, scan_m)

-
-

is equivalent to the following pseudo-code:

-
-

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i -// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. -sequence_length = scan_1.shape[axis_1];

-

// initialize state-variables -st_1 = init_1; … st_n = init_n; -// initialize scan-output variables: [] denotes an empty tensor -scan_out_1 = []; …; scan_out_k = []; -// identify number of iterations:

-

// execute loop -for (int t = 0; t < sequence_length; ++t) {

-
-

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor -// of rank one less than T obtained by indexing T at position t along axis k. -si_1 = scan_1<axis=axis_1>[t]; -… ; -si_m = scan_m<axis=axis_m>[t]; -// execute loop-body -st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) -// accumulate the scan-output elements -scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

-
-

}

-

return st_1, …, st_n, scan_out_1, …, scan_out_k;

-
-

Sample usage: Encoding RNN using a Scan

-

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, -recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can -be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes -%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these -values are computed in the outer graph, they need to be passed in as extra state_variables.

-
-
-
graph rnn-encoding {

%H_0 = … -%X = … -%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) -return %Y, %Y_h

-
-
-

}

-
-
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] -%X_t[FLOAT, tensor]

-
-
) {

%Wi = … -%Ri = … -%Wbi = … -%Rbi = … -%t1 = X_t * (Wi^T) -%t2 = H_tminus1*(Ri^T) -%t3 = Add(%t1, %t2) -%t4 = Add(%t3, %Wbi) -%t5 = Add(%t4, %Rbi) -%Ht = Tanh(%t5) -%Accumulate = Identity(%Ht) -return %Ht, %Accumulate

-
-
-

}

-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • -
-

Type Constraints

-
    -
  • I tensor(int64): Int64 tensor

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
-
- -
-
-
-
-

OnnxScan_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScan_16(*args, **kwargs)#
-

Version

-

Onnx name: Scan

-

This version of the operator has been available since -version 16.

-

Summary

-

Scan can be used to iterate over one or more scan_input tensors, -constructing zero or more scan_output tensors. It combines ideas from general recurrences, -functional programming constructs such as scan, fold, map, and zip and is intended to enable -generalizations of RNN-like constructs for sequence-to-sequence processing. -Other tensors (referred to as state_variables here) can be used to carry a state -when iterating from one element to another (similar to hidden-state in RNNs, also referred -to as loop-carried dependences in the context of loops). -Many common usages involve a single scan_input tensor (where functionality -similar to scan, fold and map can be obtained). When more than one scan_input is used, -a behavior similar to zip is obtained.

-

The attribute body must be a graph, specifying the computation to be performed in -every iteration. It takes as input the current values of the state_variables and -the current iterated element of the scan_inputs. It must return the (updated) values -of the state_variables and zero or more scan_output_element tensors. The values of the -scan_output_element tensors are concatenated over all the iterations to produce the -scan_output values of the scan construct (similar to the concatenated intermediate -hidden-state values of RNN-like constructs). All the output tensors (state_variables as -well as scan_output_element tensors) are required to have the same shape in each iteration -of the loop (a restriction imposed to enable efficient memory allocation).

-

Note that the iterated element passed to the body subgraph does not have a sequence -axis. It will have a rank one less than the rank of the corresponding scan_input.

-

The scan operation returns the final values of the state_variables as well as the -scan_outputs.

-

The optional attribute scan_input_directions specifies the direction (forward or backward) -for each scan input. If this attribute is omitted, all sequences are scanned in the forward -direction. A bidirectional scan may be performed by specifying the same tensor input twice -in the scan_inputs, once with a forward direction, and once with a backward direction.

-

The scan_output of the operation is produced by concatenating the scan_output_element -values produced by the body in each iteration. The optional attribute scan_output_directions -specifies the direction in which scan_output is constructed (by appending or prepending the -scan_output_element to scan_output in each iteration) for each scan_output. If this attribute -is omitted, the scan_output_element is appended to the scan_output in each iteration.

-

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. -If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the -batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. -Note that scanning a non-zero axis may be less efficient than scanning axis zero.

-

The optional attribute scan_output_axes specifies the axis along which the scan_outputs -are accumulated for each scan_output. For example, if axis 1 is the time axis (to be -scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis -value of 1.

-

Note that because of the ONNX restriction that only the last parameter of an operator can -be variadic, the initial-states and scan-inputs are listed together as one input parameter. -Similarly, the final-states and scan-outputs are listed together as one output parameter. -The attribute num_scan_inputs indicates the number M of scan-inputs.

-

The behavior of

-
-
-
Scan <

num_scan_inputs = m, -body = loop-body, -scan_input_axes = [axis_1, …, axis_m]

-
-
-

> (init_1, …, init_n, scan_1, …, scan_m)

-
-

is equivalent to the following pseudo-code:

-
-

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i -// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. -sequence_length = scan_1.shape[axis_1];

-

// initialize state-variables -st_1 = init_1; … st_n = init_n; -// initialize scan-output variables: [] denotes an empty tensor -scan_out_1 = []; …; scan_out_k = []; -// identify number of iterations:

-

// execute loop -for (int t = 0; t < sequence_length; ++t) {

-
-

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor -// of rank one less than T obtained by indexing T at position t along axis k. -si_1 = scan_1<axis=axis_1>[t]; -… ; -si_m = scan_m<axis=axis_m>[t]; -// execute loop-body -st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) -// accumulate the scan-output elements -scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

-
-

}

-

return st_1, …, st_n, scan_out_1, …, scan_out_k;

-
-

Sample usage: Encoding RNN using a Scan

-

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, -recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can -be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes -%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these -values are computed in the outer graph, they need to be passed in as extra state_variables.

-
-
-
graph rnn-encoding {

%H_0 = … -%X = … -%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) -return %Y, %Y_h

-
-
-

}

-
-
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] -%X_t[FLOAT, tensor]

-
-
) {

%Wi = … -%Ri = … -%Wbi = … -%Rbi = … -%t1 = X_t * (Wi^T) -%t2 = H_tminus1*(Ri^T) -%t3 = Add(%t1, %t2) -%t4 = Add(%t3, %Wbi) -%t5 = Add(%t4, %Rbi) -%Ht = Tanh(%t5) -%Accumulate = Identity(%Ht) -return %Ht, %Accumulate

-
-
-

}

-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • -
-

Type Constraints

-
    -
  • I tensor(int64): Int64 tensor

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
-
- -
-
-
-
-

OnnxScan_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScan_8(*args, **kwargs)#
-

Version

-

Onnx name: Scan

-

This version of the operator has been available since -version 8.

-

Summary

-

Scan can be used to iterate over one or more scan_input tensors, -constructing zero or more scan_output tensors. It combines ideas from general recurrences, -functional programming constructs such as scan, fold, map, and zip and is intended to enable -generalizations of RNN-like constructs for sequence-to-sequence processing. -Other tensors (referred to as state_variables here) can be used to carry a state -when iterating from one element to another (similar to hidden-state in RNNs, also referred -to as loop-carried dependences in the context of loops). All these tensors are required to -have the same shape in each iteration of the loop (a restriction imposed to enable efficient -memory allocation). Many common usages involve a single scan_input tensor (where functionality -similar to scan, fold and map can be obtained). When more than one scan_input is used, -a behavior similar to zip is obtained.

-

The attribute body must be a graph, specifying the computation to be performed in -every iteration. It takes as input the current values of the state_variables and -the current iterated element of the scan_inputs. It must return the (updated) values -of the state_variables and zero or more scan_output_element tensors. The values of the -scan_output_element tensors are concatenated over all the iterations to produce the -scan_output values of the scan construct (similar to the concatenated intermediate -hidden-state values of RNN-like constructs).

-

The scan operation returns the final values of the state_variables as well as the -scan_outputs.

-

The operation supports batching, and the batch-axis is required to be 0. -When multiple scan_input tensors are used, they must all have the same batch-size, -and they must all have the same maximum-sequence-length (the dimensionality of the -sequence axis or scan axis). The sequence axis or scan axis is required to be 1.

-

The operation has an optional sequence_lens input (of shape [BATCH_SIZE]) to -allow variable length sequences of length <= the maximum-sequence-length. If this -input is not specified, all sequences are assumed to be of length equal to -maximum-sequence-length. For variable length input sequences, the scan_outputs -will consist of a sequence of same length as the input, padded to the -maximum-sequence-length.

-

The optional attribute directions can be used to scan a sequence in the reverse direction. -If this attribute is omitted, all sequences are scanned in the forward direction. -A bidirectional scan be performed by specifying the same tensor input twice in the -scan_inputs, once with a forward direction, and once with a backward direction.

-

Note that because of the ONNX restriction that only the last parameter of an operator can -be variadic, the initial-states and scan-inputs are listed together as one input parameter. -Similarly, the final-states and scan-outputs are listed together as one output parameter. -The attribute num_scan_inputs indicates the number M of scan-inputs.

-

The behavior of

-
-
-
Scan <

num_scan_inputs = m, -body = loop-body

-
-
-

> (sequence_lengths, init_1, …, init_n, scan_1, …, scan_m)

-
-

is equivalent to the following pseudo-code:

-
-

// T.shape[0] denotes the batch-size of T -// The batch-size of scan_1, …, scan_m are all required to be equal -batch_size = scan_1.shape[0];

-

// scan_i.shape[1] denotes the (max) sequence-length of scan_i -// scan_i.shape[1] is required to be equal to scan_j.shape[1] for all i,j. -max_sequence_length = scan_1.shape[1];

-
-
for (int batch = 0; batch < batch_size; ++batch) {

// initialize state-variables -st_1 = init_1; … st_n = init_n; -// initialize scan-output variables: [] denotes an empty tensor -scan_out_1 = []; …; scan_out_k = []; -// identify number of iterations: -N = (sequence_lengths specified) ? sequence_lengths[batch] : max_sequence_length;

-

// execute loop -for (int t = 0; t < N; ++t) {

-
-

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor -// of rank one less than T obtained by indexing T at position t along axis k. -si_1 = (scan_1<axis=0>[batch])<axis=1>[t]; -… ; -si_m = (scan_m<axis=0>[batch])<axis=1>[t]; -// execute loop-body -st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) -// accumulate the scan-output elements -scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

-
-

} -// accumulate the outputs for this batch: -bst_1[batch] = st_1; …, bst_n[batch] = st_n; -// Note scan-outputs will have size max_sequence_length, but only first N values will be meaningful. -// The remaining values have an undefined value. -b_scan_out_1[batch] = scan_out_1; …; b_scan_out_k[batch] = scan_out_k;

-
-
-

} -return bst_1, …, bst_n, b_scan_out_1, …, b_scan_out_k;

-
-

Sample usage: Encoding RNN using a Scan

-

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, -recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can -be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes -%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these -values are computed in the outer graph, they need to be passed in as extra state_variables.

-
-
-
graph rnn-encoding {

%H_0 = … -%X = … -%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](“”, %H_0, %X) -return %Y, %Y_h

-
-
-

}

-
-
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] -%X_t[FLOAT, tensor]

-
-
) {

%Wi = … -%Ri = … -%Wbi = … -%Rbi = … -%t1 = X_t * (Wi^T) -%t2 = H_tminus1*(Ri^T) -%t3 = Add(%t1, %t2) -%t4 = Add(%t3, %Wbi) -%t5 = Add(%t4, %Rbi) -%Ht = Tanh(%t5) -%Accumulate = Identity(%Ht) -return %Ht, %Accumulate

-
-
-

}

-
-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-

Between 2 and 2147483647 inputs.

-
    -
  • sequence_lens (optional, heterogeneous)I: Optional tensor specifying lengths of the sequences in a batch. If this input is not specified, all sequences are assumed to be of the maximum sequence length (the dimension of the sequence axis of the scan_input tensors).

  • -
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • -
-

Type Constraints

-
    -
  • I tensor(int64): Int64 tensor

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
-
- -
-
-
-
-

OnnxScan_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScan_9(*args, **kwargs)#
-

Version

-

Onnx name: Scan

-

This version of the operator has been available since -version 9.

-

Summary

-

Scan can be used to iterate over one or more scan_input tensors, -constructing zero or more scan_output tensors. It combines ideas from general recurrences, -functional programming constructs such as scan, fold, map, and zip and is intended to enable -generalizations of RNN-like constructs for sequence-to-sequence processing. -Other tensors (referred to as state_variables here) can be used to carry a state -when iterating from one element to another (similar to hidden-state in RNNs, also referred -to as loop-carried dependences in the context of loops). -Many common usages involve a single scan_input tensor (where functionality -similar to scan, fold and map can be obtained). When more than one scan_input is used, -a behavior similar to zip is obtained.

-

The attribute body must be a graph, specifying the computation to be performed in -every iteration. It takes as input the current values of the state_variables and -the current iterated element of the scan_inputs. It must return the (updated) values -of the state_variables and zero or more scan_output_element tensors. The values of the -scan_output_element tensors are concatenated over all the iterations to produce the -scan_output values of the scan construct (similar to the concatenated intermediate -hidden-state values of RNN-like constructs). All the output tensors (state_variables as -well as scan_output_element tensors) are required to have the same shape in each iteration -of the loop (a restriction imposed to enable efficient memory allocation).

-

Note that the iterated element passed to the body subgraph does not have a sequence -axis. It will have a rank one less than the rank of the corresponding scan_input.

-

The scan operation returns the final values of the state_variables as well as the -scan_outputs.

-

The optional attribute scan_input_directions specifies the direction (forward or backward) -for each scan input. If this attribute is omitted, all sequences are scanned in the forward -direction. A bidirectional scan may be performed by specifying the same tensor input twice -in the scan_inputs, once with a forward direction, and once with a backward direction.

-

The scan_output of the operation is produced by concatenating the scan_output_element -values produced by the body in each iteration. The optional attribute scan_output_directions -specifies the direction in which scan_output is constructed (by appending or prepending the -scan_output_element to scan_output in each iteration) for each scan_output. If this attribute -is omitted, the scan_output_element is appended to the scan_output in each iteration.

-

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. -If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the -batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. -Note that scanning a non-zero axis may be less efficient than scanning axis zero.

-

The optional attribute scan_output_axes specifies the axis along which the scan_outputs -are accumulated for each scan_output. For example, if axis 1 is the time axis (to be -scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis -value of 1.

-

Note that because of the ONNX restriction that only the last parameter of an operator can -be variadic, the initial-states and scan-inputs are listed together as one input parameter. -Similarly, the final-states and scan-outputs are listed together as one output parameter. -The attribute num_scan_inputs indicates the number M of scan-inputs.

-

The behavior of

-
-
-
Scan <

num_scan_inputs = m, -body = loop-body, -scan_input_axes = [axis_1, …, axis_m]

-
-
-

> (init_1, …, init_n, scan_1, …, scan_m)

-
-

is equivalent to the following pseudo-code:

-
-

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i -// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. -sequence_length = scan_1.shape[axis_1];

-

// initialize state-variables -st_1 = init_1; … st_n = init_n; -// initialize scan-output variables: [] denotes an empty tensor -scan_out_1 = []; …; scan_out_k = []; -// identify number of iterations:

-

// execute loop -for (int t = 0; t < sequence_length; ++t) {

-
-

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor -// of rank one less than T obtained by indexing T at position t along axis k. -si_1 = scan_1<axis=axis_1>[t]; -… ; -si_m = scan_m<axis=axis_m>[t]; -// execute loop-body -st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) -// accumulate the scan-output elements -scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

-
-

}

-

return st_1, …, st_n, scan_out_1, …, scan_out_k;

-
-

Sample usage: Encoding RNN using a Scan

-

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, -recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can -be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes -%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these -values are computed in the outer graph, they need to be passed in as extra state_variables.

-
-
-
graph rnn-encoding {

%H_0 = … -%X = … -%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) -return %Y, %Y_h

-
-
-

}

-
-
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] -%X_t[FLOAT, tensor]

-
-
) {

%Wi = … -%Ri = … -%Wbi = … -%Rbi = … -%t1 = X_t * (Wi^T) -%t2 = H_tminus1*(Ri^T) -%t3 = Add(%t1, %t2) -%t4 = Add(%t3, %Wbi) -%t5 = Add(%t4, %Rbi) -%Ht = Tanh(%t5) -%Accumulate = Identity(%Ht) -return %Ht, %Accumulate

-
-
-

}

-
-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • -
-

Type Constraints

-
    -
  • I tensor(int64): Int64 tensor

  • -
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • -
-
- -
-
-
-
-

OnnxScatter#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatter(*args, **kwargs)#
-

Version

-

Onnx name: Scatter

-

This version of the operator has been deprecated since -version 11.

-

Summary

-

This operator is deprecated. Please use ScatterElements, which provides the same functionality.

-

Scatter takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry -is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatterElements#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterElements(*args, **kwargs)#
-

Version

-

Onnx name: ScatterElements

-

This version of the operator has been available since -version 16.

-

Summary

-

ScatterElements takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

reduction allows specification of an optional reduction operation, which is applied to all values in updates -tensor into output at the specified indices. -In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, -then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update -corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

When reduction is set to “add”, the update corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] += updates[i][j] if axis = 0,
-output[i][indices[i][j]] += updates[i][j] if axis = 1,
-
-
-

When reduction is set to “mul”, the update corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] *= updates[i][j] if axis = 0,
-output[i][indices[i][j]] *= updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is -name: "reduction" s: "none" type: STRING

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatterElements_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterElements_11(*args, **kwargs)#
-

Version

-

Onnx name: ScatterElements

-

This version of the operator has been available since -version 11.

-

Summary

-

ScatterElements takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry -is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatterElements_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterElements_13(*args, **kwargs)#
-

Version

-

Onnx name: ScatterElements

-

This version of the operator has been available since -version 13.

-

Summary

-

ScatterElements takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry -is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatterElements_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterElements_16(*args, **kwargs)#
-

Version

-

Onnx name: ScatterElements

-

This version of the operator has been available since -version 16.

-

Summary

-

ScatterElements takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

reduction allows specification of an optional reduction operation, which is applied to all values in updates -tensor into output at the specified indices. -In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, -then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update -corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

When reduction is set to “add”, the update corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] += updates[i][j] if axis = 0,
-output[i][indices[i][j]] += updates[i][j] if axis = 1,
-
-
-

When reduction is set to “mul”, the update corresponding to the [i][j] entry is performed as below:

-
output[indices[i][j]][j] *= updates[i][j] if axis = 0,
-output[i][indices[i][j]] *= updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is -name: "reduction" s: "none" type: STRING

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatterND#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterND(*args, **kwargs)#
-

Version

-

Onnx name: ScatterND

-

This version of the operator has been available since -version 16.

-

Summary

-

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, -and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation -is produced by creating a copy of the input data, and then updating its value to values -specified by updates at specific index positions specified by indices. Its output shape -is the same as the shape of data. Note that indices should not have duplicate entries. -That is, two or more updates for the same index-location is not supported.

-
-
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

-
-
-

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an -update to a single element of the tensor. When k is less than rank(data) each update entry specifies an -update to a slice of the tensor.

-

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the -first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. -The remaining dimensions of updates correspond to the dimensions of the -replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, -corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates -must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation -of shapes.

-

The output is calculated via the following equation:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] = updates[idx]

-
-
-

The order of iteration in the above loop is not specified. -In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. -This ensures that the output value does not depend on the iteration order.

-

reduction allows specification of an optional reduction operation, which is applied to all values in updates -tensor into output at the specified indices. -In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, -then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. -When reduction is set to “add”, output is calculated as follows:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] += updates[idx]

-
-
-

When reduction is set to “mul”, output is calculated as follows:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] *= updates[idx]

-
-
-

This operator is the inverse of GatherND.

-

Example 1:

-
data    = [1, 2, 3, 4, 5, 6, 7, 8]
-indices = [[4], [3], [1], [7]]
-updates = [9, 10, 11, 12]
-output  = [1, 11, 3, 10, 9, 6, 7, 12]
-
-
-

Example 2:

-
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-indices = [[0], [2]]
-updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
-output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-
-
-

Attributes

-
    -
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is -name: "reduction" s: "none" type: STRING

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • -
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxScatterND_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterND_11(*args, **kwargs)#
-

Version

-

Onnx name: ScatterND

-

This version of the operator has been available since -version 11.

-

Summary

-

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, -and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation -is produced by creating a copy of the input data, and then updating its value to values -specified by updates at specific index positions specified by indices. Its output shape -is the same as the shape of data. Note that indices should not have duplicate entries. -That is, two or more updates for the same index-location is not supported.

-
-
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

-
-
-

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an -update to a single element of the tensor. When k is less than rank(data) each update entry specifies an -update to a slice of the tensor.

-

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the -first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. -The remaining dimensions of updates correspond to the dimensions of the -replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, -corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates -must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation -of shapes.

-

The output is calculated via the following equation:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] = updates[idx]

-
-
-

The order of iteration in the above loop is not specified. -In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. -This ensures that the output value does not depend on the iteration order.

-

This operator is the inverse of GatherND.

-

Example 1:

-
data    = [1, 2, 3, 4, 5, 6, 7, 8]
-indices = [[4], [3], [1], [7]]
-updates = [9, 10, 11, 12]
-output  = [1, 11, 3, 10, 9, 6, 7, 12]
-
-
-

Example 2:

-
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-indices = [[0], [2]]
-updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
-output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-
-
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • -
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxScatterND_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterND_13(*args, **kwargs)#
-

Version

-

Onnx name: ScatterND

-

This version of the operator has been available since -version 13.

-

Summary

-

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, -and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation -is produced by creating a copy of the input data, and then updating its value to values -specified by updates at specific index positions specified by indices. Its output shape -is the same as the shape of data. Note that indices should not have duplicate entries. -That is, two or more updates for the same index-location is not supported.

-
-
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

-
-
-

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an -update to a single element of the tensor. When k is less than rank(data) each update entry specifies an -update to a slice of the tensor.

-

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the -first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. -The remaining dimensions of updates correspond to the dimensions of the -replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, -corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates -must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation -of shapes.

-

The output is calculated via the following equation:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] = updates[idx]

-
-
-

The order of iteration in the above loop is not specified. -In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. -This ensures that the output value does not depend on the iteration order.

-

This operator is the inverse of GatherND.

-

Example 1:

-
data    = [1, 2, 3, 4, 5, 6, 7, 8]
-indices = [[4], [3], [1], [7]]
-updates = [9, 10, 11, 12]
-output  = [1, 11, 3, 10, 9, 6, 7, 12]
-
-
-

Example 2:

-
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-indices = [[0], [2]]
-updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
-output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-
-
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • -
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxScatterND_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatterND_16(*args, **kwargs)#
-

Version

-

Onnx name: ScatterND

-

This version of the operator has been available since -version 16.

-

Summary

-

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, -and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation -is produced by creating a copy of the input data, and then updating its value to values -specified by updates at specific index positions specified by indices. Its output shape -is the same as the shape of data. Note that indices should not have duplicate entries. -That is, two or more updates for the same index-location is not supported.

-
-
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

-
-
-

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an -update to a single element of the tensor. When k is less than rank(data) each update entry specifies an -update to a slice of the tensor.

-

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the -first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. -The remaining dimensions of updates correspond to the dimensions of the -replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, -corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates -must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation -of shapes.

-

The output is calculated via the following equation:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] = updates[idx]

-
-
-

The order of iteration in the above loop is not specified. -In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. -This ensures that the output value does not depend on the iteration order.

-

reduction allows specification of an optional reduction operation, which is applied to all values in updates -tensor into output at the specified indices. -In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, -then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. -When reduction is set to “add”, output is calculated as follows:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] += updates[idx]

-
-
-

When reduction is set to “mul”, output is calculated as follows:

-
-

output = np.copy(data) -update_indices = indices.shape[:-1] -for idx in np.ndindex(update_indices):

-
-

output[indices[idx]] *= updates[idx]

-
-
-

This operator is the inverse of GatherND.

-

Example 1:

-
data    = [1, 2, 3, 4, 5, 6, 7, 8]
-indices = [[4], [3], [1], [7]]
-updates = [9, 10, 11, 12]
-output  = [1, 11, 3, 10, 9, 6, 7, 12]
-
-
-

Example 2:

-
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-indices = [[0], [2]]
-updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
-output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
-           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
-           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
-
-
-

Attributes

-
    -
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is -name: "reduction" s: "none" type: STRING

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • -
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxScatter_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatter_11(*args, **kwargs)#
-

Version

-

Onnx name: Scatter

-

This version of the operator has been deprecated since -version 11.

-

Summary

-

This operator is deprecated. Please use ScatterElements, which provides the same functionality.

-

Scatter takes three inputs data, updates, and indices of the same -rank r >= 1 and an optional attribute axis that identifies an axis of data -(by default, the outer-most axis, that is axis 0). The output of the operation -is produced by creating a copy of the input data, and then updating its value -to values specified by updates at specific index positions specified by -indices. Its output shape is the same as the shape of data.

-

For each entry in updates, the target index in data is obtained by combining -the corresponding entry in indices with the index of the entry itself: the -index-value for dimension = axis is obtained from the value of the corresponding -entry in indices and the index-value for dimension != axis is obtained from the -index of the entry itself.

-

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry -is performed as below:

-
output[indices[i][j]][j] = updates[i][j] if axis = 0,
-output[i][indices[i][j]] = updates[i][j] if axis = 1,
-
-
-

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

-

Example 1:

-
data = [
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-    [0.0, 0.0, 0.0],
-]
-indices = [
-    [1, 0, 2],
-    [0, 2, 1],
-]
-updates = [
-    [1.0, 1.1, 1.2],
-    [2.0, 2.1, 2.2],
-]
-output = [
-    [2.0, 1.1, 0.0]
-    [1.0, 0.0, 2.2]
-    [0.0, 2.1, 1.2]
-]
-
-
-

Example 2:

-
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
-indices = [[1, 3]]
-updates = [[1.1, 2.1]]
-axis = 1
-output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxScatter_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxScatter_9(*args, **kwargs)#
-

Version

-

Onnx name: Scatter

-

This version of the operator has been available since -version 9.

-

Summary

-

Given data, updates and indices input tensors of rank r >= 1, write the values provided by updates -into the first input, data, along axis dimension of data (by default outer-most one as axis=0) at corresponding indices. -For each entry in updates, the target index in data is specified by corresponding entry in indices -for dimension = axis, and index in source for dimension != axis. For instance, in a 2-D tensor case, -data[indices[i][j]][j] = updates[i][j] if axis = 0, or data[i][indices[i][j]] = updates[i][j] if axis = 1, -where i and j are loop counters from 0 up to the respective size in updates - 1. -Example 1:

-
-
-
data = [

[0.0, 0.0, 0.0], -[0.0, 0.0, 0.0], -[0.0, 0.0, 0.0],

-
-
-

] -indices = [

-
-

[1, 0, 2], -[0, 2, 1],

-
-

] -updates = [

-
-

[1.0, 1.1, 1.2], -[2.0, 2.1, 2.2],

-
-

] -output = [

-
-

[2.0, 1.1, 0.0] -[1.0, 0.0, 2.2] -[0.0, 2.1, 1.2]

-
-

]

-
-
-
Example 2:

data = [[1.0, 2.0, 3.0, 4.0, 5.0]] -indices = [[1, 3]] -updates = [[1.1, 2.1]] -axis = 1 -output = [[1.0, 1.1, 3.0, 2.1, 5.0]]

-
-
-

Attributes

-
    -
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • -
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input).

  • -
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxSelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSelu(*args, **kwargs)#
-

Version

-

Onnx name: Selu

-

This version of the operator has been available since -version 6.

-

Summary

-

Selu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the scaled exponential linear unit function, -y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717). Default value is -name: "alpha" f: 1.6732631921768188 type: FLOAT

  • -
  • gamma: Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946). Default value is -name: "gamma" f: 1.0507010221481323 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSelu_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSelu_1(*args, **kwargs)#
-

Version

-

Onnx name: Selu

-

This version of the operator has been available since -version 1.

-

Summary

-

Selu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the scaled exponential linear unit function, -y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of SELU default to 1.6732. Default value is -name: "alpha" f: 1.673200011253357 type: FLOAT

  • -
  • -
  • gamma: Coefficient of SELU default to 1.0507. Default value is -name: "gamma" f: 1.0506999492645264 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSelu_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSelu_6(*args, **kwargs)#
-

Version

-

Onnx name: Selu

-

This version of the operator has been available since -version 6.

-

Summary

-

Selu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the scaled exponential linear unit function, -y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717). Default value is -name: "alpha" f: 1.6732631921768188 type: FLOAT

  • -
  • gamma: Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946). Default value is -name: "gamma" f: 1.0507010221481323 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSequenceAt#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceAt(*args, **kwargs)#
-

Version

-

Onnx name: SequenceAt

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor copy from the tensor at ‘position’ in ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back.

-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • position (heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • tensor (heterogeneous)T: Output tensor at the specified position in the input sequence.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceAt_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceAt_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceAt

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor copy from the tensor at ‘position’ in ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back.

-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • position (heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • tensor (heterogeneous)T: Output tensor at the specified position in the input sequence.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceConstruct#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct(*args, **kwargs)#
-

Version

-

Onnx name: SequenceConstruct

-

This version of the operator has been available since -version 11.

-

Summary

-

Construct a tensor sequence containing ‘inputs’ tensors. -All tensors in ‘inputs’ must have the same data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: Tensors.

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Sequence enclosing the input tensors.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to any tensor type.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxSequenceConstruct_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceConstruct

-

This version of the operator has been available since -version 11.

-

Summary

-

Construct a tensor sequence containing ‘inputs’ tensors. -All tensors in ‘inputs’ must have the same data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • inputs (variadic, heterogeneous)T: Tensors.

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Sequence enclosing the input tensors.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to any tensor type.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxSequenceEmpty#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty(*args, **kwargs)#
-

Version

-

Onnx name: SequenceEmpty

-

This version of the operator has been available since -version 11.

-

Summary

-

Construct an empty tensor sequence, with given data type.

-

Attributes

-
    -
  • -
-

Outputs

-
    -
  • output (heterogeneous)S: Empty sequence.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxSequenceEmpty_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceEmpty

-

This version of the operator has been available since -version 11.

-

Summary

-

Construct an empty tensor sequence, with given data type.

-

Attributes

-
    -
  • -
-

Outputs

-
    -
  • output (heterogeneous)S: Empty sequence.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • -
-
- -
-
-
-
-

OnnxSequenceErase#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceErase(*args, **kwargs)#
-

Version

-

Onnx name: SequenceErase

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor sequence that removes the tensor at ‘position’ from ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back. -‘position’ is optional, by default it erases the last tensor from ‘input_sequence’.

-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • position (optional, heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Output sequence that has the tensor at the specified position removed.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceErase_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceErase_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceErase

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor sequence that removes the tensor at ‘position’ from ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back. -‘position’ is optional, by default it erases the last tensor from ‘input_sequence’.

-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • position (optional, heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Output sequence that has the tensor at the specified position removed.

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceInsert#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceInsert(*args, **kwargs)#
-

Version

-

Onnx name: SequenceInsert

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor sequence that inserts ‘tensor’ into ‘input_sequence’ at ‘position’. -‘tensor’ must have the same data type as ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back. -‘position’ is optional, by default it inserts ‘tensor’ to the back of ‘input_sequence’.

-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • tensor (heterogeneous)T: Input tensor to be inserted into the input sequence.

  • -
  • position (optional, heterogeneous)I: Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in [-n, n], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Output sequence that contains the inserted tensor at given position.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceInsert_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceInsert_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceInsert

-

This version of the operator has been available since -version 11.

-

Summary

-

Outputs a tensor sequence that inserts ‘tensor’ into ‘input_sequence’ at ‘position’. -‘tensor’ must have the same data type as ‘input_sequence’. -Accepted range for ‘position’ is in [-n, n], where n is the number of tensors in ‘input_sequence’. -Negative value means counting positions from the back. -‘position’ is optional, by default it inserts ‘tensor’ to the back of ‘input_sequence’.

-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
  • tensor (heterogeneous)T: Input tensor to be inserted into the input sequence.

  • -
  • position (optional, heterogeneous)I: Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in [-n, n], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: Output sequence that contains the inserted tensor at given position.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceLength#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceLength(*args, **kwargs)#
-

Version

-

Onnx name: SequenceLength

-

This version of the operator has been available since -version 11.

-

Summary

-

Produces a scalar(tensor of empty shape) containing the number of tensors in ‘input_sequence’.

-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
-

Outputs

-
    -
  • length (heterogeneous)I: Length of input sequence. It must be a scalar(tensor of empty shape).

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int64): Constrain output to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxSequenceLength_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSequenceLength_11(*args, **kwargs)#
-

Version

-

Onnx name: SequenceLength

-

This version of the operator has been available since -version 11.

-

Summary

-

Produces a scalar(tensor of empty shape) containing the number of tensors in ‘input_sequence’.

-

Inputs

-
    -
  • input_sequence (heterogeneous)S: Input sequence.

  • -
-

Outputs

-
    -
  • length (heterogeneous)I: Length of input sequence. It must be a scalar(tensor of empty shape).

  • -
-

Type Constraints

-
    -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • -
  • I tensor(int64): Constrain output to integral tensor. It must be a scalar(tensor of empty shape).

  • -
-
- -
-
-
-
-

OnnxShape#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShape(*args, **kwargs)#
-

Version

-

Onnx name: Shape

-

This version of the operator has been available since -version 15.

-

Summary

-

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. -Optional attributes start and end can be used to compute a slice of the input tensor’s shape. -If start axis is omitted, the slice starts from axis 0. -The end axis, if specified, is exclusive (and the returned value will not include the size of that axis). -If the end axis is omitted, the axes upto the last one will be included. -Negative axes indicate counting back from the last axis. -Note that axes will be clipped to the range [0, r-1], where r is the -rank of the input tensor if they are out-of-range (after adding r in the case of -negative axis). Thus, specifying any end value > r is equivalent to specifying an end -value of r, and specifying any start value < -r is equivalent to specifying a start -value of 0.

-

For example: -Input tensor with shape: [2, 3, 4] -No attributes specified. -Output: [2, 3, 4]

-

Input tensor with shape: [2, 3, 4] -start: -1 -Output: [4]

-

Input tensor with shape: [2, 3, 4] -end: -1 -Output: [2, 3]

-

Input tensor with shape: [2, 3, 4] -start: 1 -end: 2 -Output: [3]

-

Attributes

-
    -
  • -
  • start: (Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back. Default value is -name: "start" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • shape (heterogeneous)T1: Shape of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor.

  • -
-
- -
-
-
-
-

OnnxShape_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShape_1(*args, **kwargs)#
-

Version

-

Onnx name: Shape

-

This version of the operator has been available since -version 1.

-

Summary

-

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • shape (heterogeneous)T1: Shape of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor.

  • -
-
- -
-
-
-
-

OnnxShape_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShape_13(*args, **kwargs)#
-

Version

-

Onnx name: Shape

-

This version of the operator has been available since -version 13.

-

Summary

-

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • shape (heterogeneous)T1: Shape of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor.

  • -
-
- -
-
-
-
-

OnnxShape_15#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShape_15(*args, **kwargs)#
-

Version

-

Onnx name: Shape

-

This version of the operator has been available since -version 15.

-

Summary

-

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. -Optional attributes start and end can be used to compute a slice of the input tensor’s shape. -If start axis is omitted, the slice starts from axis 0. -The end axis, if specified, is exclusive (and the returned value will not include the size of that axis). -If the end axis is omitted, the axes upto the last one will be included. -Negative axes indicate counting back from the last axis. -Note that axes will be clipped to the range [0, r-1], where r is the -rank of the input tensor if they are out-of-range (after adding r in the case of -negative axis). Thus, specifying any end value > r is equivalent to specifying an end -value of r, and specifying any start value < -r is equivalent to specifying a start -value of 0.

-

For example: -Input tensor with shape: [2, 3, 4] -No attributes specified. -Output: [2, 3, 4]

-

Input tensor with shape: [2, 3, 4] -start: -1 -Output: [4]

-

Input tensor with shape: [2, 3, 4] -end: -1 -Output: [2, 3]

-

Input tensor with shape: [2, 3, 4] -start: 1 -end: 2 -Output: [3]

-

Attributes

-
    -
  • -
  • start: (Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back. Default value is -name: "start" i: 0 type: INT

  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • shape (heterogeneous)T1: Shape of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor.

  • -
-
- -
-
-
-
-

OnnxShrink#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShrink(*args, **kwargs)#
-

Version

-

Onnx name: Shrink

-

This version of the operator has been available since -version 9.

-

Summary

-

Shrink takes one input data (Tensor<numeric>) and produces one Tensor output, -having same datatype and shape with input. It has two attributes, lambd and -bias. The formula of this operator is: If x < -lambd, y = x + bias; -If x > lambd, y = x - bias; Otherwise, y = 0.

-

Attributes

-
    -
  • bias: The bias value added to output. Default is 0. Default value is -name: "bias" f: 0.0 type: FLOAT

  • -
  • lambd: The lambd value for the Shrink formulation. Default is 0.5. Default value is -name: "lambd" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
-
- -
-
-
-
-

OnnxShrink_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxShrink_9(*args, **kwargs)#
-

Version

-

Onnx name: Shrink

-

This version of the operator has been available since -version 9.

-

Summary

-

Shrink takes one input data (Tensor<numeric>) and produces one Tensor output, -having same datatype and shape with input. It has two attributes, lambd and -bias. The formula of this operator is: If x < -lambd, y = x + bias; -If x > lambd, y = x - bias; Otherwise, y = 0.

-

Attributes

-
    -
  • bias: The bias value added to output. Default is 0. Default value is -name: "bias" f: 0.0 type: FLOAT

  • -
  • lambd: The lambd value for the Shrink formulation. Default is 0.5. Default value is -name: "lambd" f: 0.5 type: FLOAT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input data as Tensor.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrains input to only numeric types.

  • -
-
- -
-
-
-
-

OnnxSigmoid#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSigmoid(*args, **kwargs)#
-

Version

-

Onnx name: Sigmoid

-

This version of the operator has been available since -version 13.

-

Summary

-

Sigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the -tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSigmoid_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSigmoid_1(*args, **kwargs)#
-

Version

-

Onnx name: Sigmoid

-

This version of the operator has been available since -version 1.

-

Summary

-

Sigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the -tensor elementwise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSigmoid_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSigmoid_13(*args, **kwargs)#
-

Version

-

Onnx name: Sigmoid

-

This version of the operator has been available since -version 13.

-

Summary

-

Sigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the -tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSigmoid_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSigmoid_6(*args, **kwargs)#
-

Version

-

Onnx name: Sigmoid

-

This version of the operator has been available since -version 6.

-

Summary

-

Sigmoid takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the -tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSign#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSign(*args, **kwargs)#
-

Version

-

Onnx name: Sign

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculate the sign of the given input tensor element-wise. -If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSign_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSign_13(*args, **kwargs)#
-

Version

-

Onnx name: Sign

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculate the sign of the given input tensor element-wise. -If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSign_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSign_9(*args, **kwargs)#
-

Version

-

Onnx name: Sign

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculate the sign of the given input tensor element-wise. -If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSin#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSin(*args, **kwargs)#
-

Version

-

Onnx name: Sin

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the sine of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The sine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSin_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSin_7(*args, **kwargs)#
-

Version

-

Onnx name: Sin

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the sine of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The sine of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSinh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSinh(*args, **kwargs)#
-

Version

-

Onnx name: Sinh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic sine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic sine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSinh_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSinh_9(*args, **kwargs)#
-

Version

-

Onnx name: Sinh

-

This version of the operator has been available since -version 9.

-

Summary

-

Calculates the hyperbolic sine of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic sine values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSize#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSize(*args, **kwargs)#
-

Version

-

Onnx name: Size

-

This version of the operator has been available since -version 13.

-

Summary

-

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • -
-
- -
-
-
-
-

OnnxSize_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSize_1(*args, **kwargs)#
-

Version

-

Onnx name: Size

-

This version of the operator has been available since -version 1.

-

Summary

-

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • -
-
- -
-
-
-
-

OnnxSize_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSize_13(*args, **kwargs)#
-

Version

-

Onnx name: Size

-

This version of the operator has been available since -version 13.

-

Summary

-

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • -
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • -
-
- -
-
-
-
-

OnnxSlice#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSlice(*args, **kwargs)#
-

Version

-

Onnx name: Slice

-

This version of the operator has been available since -version 13.

-

Summary

-

Produces a slice of the input tensor along multiple axes. Similar to numpy: -https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html -Slices uses starts, ends, axes and steps inputs to specify the start and end -dimension and step for each axis in the list of axes, it uses this information to -slice the input data tensor. If a negative value is passed for any of the -start or end indices, it represents number of elements before the end of that -dimension. If the value passed to start or end is larger than the n (the -number of elements in this dimension), it represents n. For slicing to the -end of a dimension with unknown size, it is recommended to pass in INT_MAX -when sclicing forward and ‘INT_MIN’ when slicing backward. -If a negative value is passed for step, it represents slicing backward. -However step value cannot be 0. -If axes are omitted, they are set to [0, …, ndim-1]. -If steps are omitted, they are set to [1, …, 1] of length len(starts) -Example 1:

-
-
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -axes = [0, 1] -starts = [1, 0] -ends = [2, 3] -steps = [1, 2] -result = [

-
-

[5, 7],

-
-

]

-
-
-
Example 2:
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -starts = [0, 1] -ends = [-1, 1000] -result = [

-
-

[2, 3, 4],

-
-

]

-
-
-

Inputs

-

Between 3 and 5 inputs.

-
    -
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • -
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • -
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • -
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • -
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Sliced data tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxSlice_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSlice_1(*args, **kwargs)#
-

Version

-

Onnx name: Slice

-

This version of the operator has been available since -version 1.

-

Summary

-

Produces a slice of the input tensor along multiple axes. Similar to numpy: -https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html -Slices uses axes, starts and ends attributes to specify the start and end -dimension for each axis in the list of axes, it uses this information to -slice the input data tensor. If a negative value is passed for any of the -start or end indices, it represent number of elements before the end of that -dimension. If the value passed to start or end is larger than the n (the -number of elements in this dimension), it represents n. For slicing to the -end of a dimension with unknown size, it is recommended to pass in INT_MAX. -If axes are omitted, they are set to [0, …, ndim-1]. -Example 1:

-
-
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -axes = [0, 1] -starts = [1, 0] -ends = [2, 3] -result = [

-
-

[5, 6, 7],

-
-

]

-
-
-
Example 2:
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -starts = [0, 1] -ends = [-1, 1000] -result = [

-
-

[2, 3, 4],

-
-

]

-
-
-

Attributes

-
    -
  • -
  • -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Sliced data tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSlice_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSlice_10(*args, **kwargs)#
-

Version

-

Onnx name: Slice

-

This version of the operator has been available since -version 10.

-

Summary

-

Produces a slice of the input tensor along multiple axes. Similar to numpy: -https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html -Slices uses starts, ends, axes and steps inputs to specify the start and end -dimension and step for each axis in the list of axes, it uses this information to -slice the input data tensor. If a negative value is passed for any of the -start or end indices, it represent number of elements before the end of that -dimension. If the value passed to start or end is larger than the n (the -number of elements in this dimension), it represents n. For slicing to the -end of a dimension with unknown size, it is recommended to pass in INT_MAX. -If a negative value is passed for step, it represents slicing backward. -If axes are omitted, they are set to [0, …, ndim-1]. -If steps are omitted, they are set to [1, …, 1] of length len(starts) -Example 1:

-
-
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -axes = [0, 1] -starts = [1, 0] -ends = [2, 3] -steps = [1, 2] -result = [

-
-

[5, 7],

-
-

]

-
-
-
Example 2:
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -starts = [0, 1] -ends = [-1, 1000] -result = [

-
-

[2, 3, 4],

-
-

]

-
-
-

Inputs

-

Between 3 and 5 inputs.

-
    -
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • -
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • -
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • -
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to.

  • -
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Default to 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Sliced data tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxSlice_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSlice_11(*args, **kwargs)#
-

Version

-

Onnx name: Slice

-

This version of the operator has been available since -version 11.

-

Summary

-

Produces a slice of the input tensor along multiple axes. Similar to numpy: -https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html -Slices uses starts, ends, axes and steps inputs to specify the start and end -dimension and step for each axis in the list of axes, it uses this information to -slice the input data tensor. If a negative value is passed for any of the -start or end indices, it represents number of elements before the end of that -dimension. If the value passed to start or end is larger than the n (the -number of elements in this dimension), it represents n. For slicing to the -end of a dimension with unknown size, it is recommended to pass in INT_MAX -when sclicing forward and ‘INT_MIN’ when slicing backward. -If a negative value is passed for step, it represents slicing backward. -However step value cannot be 0. -If axes are omitted, they are set to [0, …, ndim-1]. -If steps are omitted, they are set to [1, …, 1] of length len(starts) -Example 1:

-
-
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -axes = [0, 1] -starts = [1, 0] -ends = [2, 3] -steps = [1, 2] -result = [

-
-

[5, 7],

-
-

]

-
-
-
Example 2:
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -starts = [0, 1] -ends = [-1, 1000] -result = [

-
-

[2, 3, 4],

-
-

]

-
-
-

Inputs

-

Between 3 and 5 inputs.

-
    -
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • -
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • -
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • -
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • -
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Sliced data tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxSlice_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSlice_13(*args, **kwargs)#
-

Version

-

Onnx name: Slice

-

This version of the operator has been available since -version 13.

-

Summary

-

Produces a slice of the input tensor along multiple axes. Similar to numpy: -https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html -Slices uses starts, ends, axes and steps inputs to specify the start and end -dimension and step for each axis in the list of axes, it uses this information to -slice the input data tensor. If a negative value is passed for any of the -start or end indices, it represents number of elements before the end of that -dimension. If the value passed to start or end is larger than the n (the -number of elements in this dimension), it represents n. For slicing to the -end of a dimension with unknown size, it is recommended to pass in INT_MAX -when sclicing forward and ‘INT_MIN’ when slicing backward. -If a negative value is passed for step, it represents slicing backward. -However step value cannot be 0. -If axes are omitted, they are set to [0, …, ndim-1]. -If steps are omitted, they are set to [1, …, 1] of length len(starts) -Example 1:

-
-
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -axes = [0, 1] -starts = [1, 0] -ends = [2, 3] -steps = [1, 2] -result = [

-
-

[5, 7],

-
-

]

-
-
-
Example 2:
-
data = [

[1, 2, 3, 4], -[5, 6, 7, 8],

-
-
-

] -starts = [0, 1] -ends = [-1, 1000] -result = [

-
-

[2, 3, 4],

-
-

]

-
-
-

Inputs

-

Between 3 and 5 inputs.

-
    -
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • -
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • -
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • -
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • -
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Sliced data tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • -
-
- -
-
-
-
-

OnnxSoftmax#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmax(*args, **kwargs)#
-

Version

-

Onnx name: Softmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the normalized exponential values for the given input:

-
-

Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1)

-
-

The “axis” attribute indicates the dimension along which Softmax -will be performed. The output tensor has the same shape -and contains the Softmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension Softmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftmaxCrossEntropyLoss#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss(*args, **kwargs)#
-

Version

-

Onnx name: SoftmaxCrossEntropyLoss

-

This version of the operator has been available since -version 13.

-

Summary

-

Loss function that measures the softmax cross entropy -between ‘scores’ and ‘labels’. -This operator first computes a loss tensor whose shape is identical to the labels input. -If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). -If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), -the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. -After L is available, this operator can optionally do a reduction operator.

-
-
shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
The loss for one sample, l_i, can caculated as follows:

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.

-
-
or

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

-
-
loss is zero for the case when label-value equals ignore_index.

l[i][d1][d2]…[dk] = 0, when labels[n][d1][d2]…[dk] = ignore_index

-
-
where:

p = Softmax(scores) -y = Log(p) -c = labels[i][d1][d2]…[dk]

-
-
-

Finally, L is optionally reduced: -If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk). -If reduction = ‘sum’, the output is scalar: Sum(L). -If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), -where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • -
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • -
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxSoftmaxCrossEntropyLoss_12#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_12(*args, **kwargs)#
-

Version

-

Onnx name: SoftmaxCrossEntropyLoss

-

This version of the operator has been available since -version 12.

-

Summary

-

Loss function that measures the softmax cross entropy -between ‘scores’ and ‘labels’. -This operator first computes a loss tensor whose shape is identical to the labels input. -If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). -If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), -the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. -After L is available, this operator can optionally do a reduction operator.

-
-
shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
The loss for one sample, l_i, can caculated as follows:

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.

-
-
or

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

-
-
loss is zero for the case when label-value equals ignore_index.

l[i][d1][d2]…[dk] = 0, when labels[n][d1][d2]…[dk] = ignore_index

-
-
where:

p = Softmax(scores) -y = Log(p) -c = labels[i][d1][d2]…[dk]

-
-
-

Finally, L is optionally reduced: -If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk). -If reduction = ‘sum’, the output is scalar: Sum(L). -If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), -where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • -
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • -
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxSoftmaxCrossEntropyLoss_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_13(*args, **kwargs)#
-

Version

-

Onnx name: SoftmaxCrossEntropyLoss

-

This version of the operator has been available since -version 13.

-

Summary

-

Loss function that measures the softmax cross entropy -between ‘scores’ and ‘labels’. -This operator first computes a loss tensor whose shape is identical to the labels input. -If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). -If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), -the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. -After L is available, this operator can optionally do a reduction operator.

-
-
shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

-
-
The loss for one sample, l_i, can caculated as follows:

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.

-
-
or

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

-
-
loss is zero for the case when label-value equals ignore_index.

l[i][d1][d2]…[dk] = 0, when labels[n][d1][d2]…[dk] = ignore_index

-
-
where:

p = Softmax(scores) -y = Log(p) -c = labels[i][d1][d2]…[dk]

-
-
-

Finally, L is optionally reduced: -If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk). -If reduction = ‘sum’, the output is scalar: Sum(L). -If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), -where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

-

Attributes

-
    -
  • -
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is -name: "reduction" s: "mean" type: STRING

  • -
-

Inputs

-

Between 2 and 3 inputs.

-
    -
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • -
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • -
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • -
-

Outputs

-

Between 1 and 2 outputs.

-
    -
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • -
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • -
-
- -
-
-
-
-

OnnxSoftmax_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmax_1(*args, **kwargs)#
-

Version

-

Onnx name: Softmax

-

This version of the operator has been available since -version 1.

-

Summary

-
-
The operator computes the softmax (normalized exponential) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

-
-
-

(batch_size x input_feature_dimensions). The output tensor has the same shape -and contains the softmax values of the corresponding input.

-

Input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftmax_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmax_11(*args, **kwargs)#
-

Version

-

Onnx name: Softmax

-

This version of the operator has been available since -version 11.

-

Summary

-
-
The operator computes the softmax (normalized exponential) values for each layer in the batch

of the given input.

-
-
-

The input does not need to explicitly be a 2D vector; rather, it will be -coerced into one. For an arbitrary n-dimensional tensor -input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is -the axis provided, then input will be coerced into a 2-dimensional tensor with -dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default -case where axis=1, this means the input tensor will be coerced into a 2D tensor -of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. -In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. -Each of these dimensions must be matched correctly, or else the operator -will throw errors. The output tensor has the same shape -and contains the softmax values of the corresponding input.

-

Attributes

-
    -
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: 1 type: INT

  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftmax_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftmax_13(*args, **kwargs)#
-

Version

-

Onnx name: Softmax

-

This version of the operator has been available since -version 13.

-

Summary

-

The operator computes the normalized exponential values for the given input:

-
-

Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1)

-
-

The “axis” attribute indicates the dimension along which Softmax -will be performed. The output tensor has the same shape -and contains the Softmax values of the corresponding input.

-

Attributes

-
    -
  • axis:

  • -
-

Describes the dimension Softmax will be performed on. -Negative value means counting dimensions -from the back. Accepted range is [-r, r-1] where r = rank(input).

-
-
-
Default value is

name: "axis" i: -1 type: INT

-
-
-
-

Inputs

-
    -
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftplus#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftplus(*args, **kwargs)#
-

Version

-

Onnx name: Softplus

-

This version of the operator has been available since -version 1.

-

Summary

-

Softplus takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: 1D input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: 1D input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftplus_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftplus_1(*args, **kwargs)#
-

Version

-

Onnx name: Softplus

-

This version of the operator has been available since -version 1.

-

Summary

-

Softplus takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to -the tensor elementwise.

-

Inputs

-
    -
  • X (heterogeneous)T: 1D input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: 1D input tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftsign#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftsign(*args, **kwargs)#
-

Version

-

Onnx name: Softsign

-

This version of the operator has been available since -version 1.

-

Summary

-

Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The softsign (x/(1+|x|)) values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSoftsign_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSoftsign_1(*args, **kwargs)#
-

Version

-

Onnx name: Softsign

-

This version of the operator has been available since -version 1.

-

Summary

-

Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The softsign (x/(1+|x|)) values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSpaceToDepth#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth(*args, **kwargs)#
-

Version

-

Onnx name: SpaceToDepth

-

This version of the operator has been available since -version 13.

-

Summary

-

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, -this op outputs a copy of the input tensor where values from the height and width dimensions -are moved to the depth dimension.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSpaceToDepth_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_1(*args, **kwargs)#
-

Version

-

Onnx name: SpaceToDepth

-

This version of the operator has been available since -version 1.

-

Summary

-

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, -this op outputs a copy of the input tensor where values from the height and width dimensions -are moved to the depth dimension.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSpaceToDepth_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_13(*args, **kwargs)#
-

Version

-

Onnx name: SpaceToDepth

-

This version of the operator has been available since -version 13.

-

Summary

-

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, -this op outputs a copy of the input tensor where values from the height and width dimensions -are moved to the depth dimension.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplit#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplit(*args, **kwargs)#
-

Version

-

Onnx name: Split

-

This version of the operator has been available since -version 13.

-

Summary

-

Split a tensor into a list of tensors, along the specified -‘axis’. Lengths of the parts can be specified using input ‘split’. -Otherwise, the tensor is split to equal sized parts.

-

Attributes

-
    -
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
  • split (optional, heterogeneous)tensor(int64): Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at ‘axis’ specified.

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplitToSequence#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplitToSequence(*args, **kwargs)#
-

Version

-

Onnx name: SplitToSequence

-

This version of the operator has been available since -version 11.

-

Summary

-

Split a tensor into a sequence of tensors, along the specified -‘axis’. Lengths of the parts can be specified using argument ‘split’. -‘split’ must contain only positive numbers. -‘split’ is either a scalar (tensor of empty shape), or a 1-D tensor. -If ‘split’ is a scalar, then ‘input’ will be split into equally sized chunks(if possible). -Last chunk will be smaller if the ‘input’ size along the given axis ‘axis’ is not divisible -by ‘split’. -Otherwise, the tensor is split into ‘size(split)’ chunks, with lengths of the parts on ‘axis’ -specified in ‘split’. In this scenario, the sum of entries in ‘split’ must be equal to the -dimension size of input tensor on ‘axis’.

-

Attributes

-
    -
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1]. Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the split dimension or not. Default 1, which means we keep split dimension. If input ‘split’ is specified, this attribute is ignored. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
  • split (optional, heterogeneous)I: Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0.

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: One or more outputs forming a sequence of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • -
  • I tensor(int32), tensor(int64): Constrain split size to integral tensor.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplitToSequence_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplitToSequence_11(*args, **kwargs)#
-

Version

-

Onnx name: SplitToSequence

-

This version of the operator has been available since -version 11.

-

Summary

-

Split a tensor into a sequence of tensors, along the specified -‘axis’. Lengths of the parts can be specified using argument ‘split’. -‘split’ must contain only positive numbers. -‘split’ is either a scalar (tensor of empty shape), or a 1-D tensor. -If ‘split’ is a scalar, then ‘input’ will be split into equally sized chunks(if possible). -Last chunk will be smaller if the ‘input’ size along the given axis ‘axis’ is not divisible -by ‘split’. -Otherwise, the tensor is split into ‘size(split)’ chunks, with lengths of the parts on ‘axis’ -specified in ‘split’. In this scenario, the sum of entries in ‘split’ must be equal to the -dimension size of input tensor on ‘axis’.

-

Attributes

-
    -
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1]. Default value is -name: "axis" i: 0 type: INT

  • -
  • keepdims: Keep the split dimension or not. Default 1, which means we keep split dimension. If input ‘split’ is specified, this attribute is ignored. Default value is -name: "keepdims" i: 1 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
  • split (optional, heterogeneous)I: Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0.

  • -
-

Outputs

-
    -
  • output_sequence (heterogeneous)S: One or more outputs forming a sequence of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • -
  • I tensor(int32), tensor(int64): Constrain split size to integral tensor.

  • -
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplit_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplit_1(*args, **kwargs)#
-

Version

-

Onnx name: Split

-

This version of the operator has been available since -version 1.

-

Summary

-

Split a tensor into a list of tensors, along the specified -‘axis’. The lengths of the split can be specified using argument ‘axis’ or -optional second input blob to the operator. Otherwise, the tensor is split -to equal sized parts.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
  • split (optional, heterogeneous)T: Optional list of output lengths (see also arg ‘split’)

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs… (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSplit_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplit_11(*args, **kwargs)#
-

Version

-

Onnx name: Split

-

This version of the operator has been available since -version 11.

-

Summary

-

Split a tensor into a list of tensors, along the specified -‘axis’. Lengths of the parts can be specified using argument ‘split’. -Otherwise, the tensor is split to equal sized parts.

-

Attributes

-
    -
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is -name: "axis" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplit_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplit_13(*args, **kwargs)#
-

Version

-

Onnx name: Split

-

This version of the operator has been available since -version 13.

-

Summary

-

Split a tensor into a list of tensors, along the specified -‘axis’. Lengths of the parts can be specified using input ‘split’. -Otherwise, the tensor is split to equal sized parts.

-

Attributes

-
    -
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is -name: "axis" i: 0 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
  • split (optional, heterogeneous)tensor(int64): Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at ‘axis’ specified.

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSplit_2#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSplit_2(*args, **kwargs)#
-

Version

-

Onnx name: Split

-

This version of the operator has been available since -version 2.

-

Summary

-

Split a tensor into a list of tensors, along the specified -‘axis’. Lengths of the parts can be specified using argument ‘split’. -Otherwise, the tensor is split to equal sized parts.

-

Attributes

-
    -
  • axis: Which axis to split on. Default value is -name: "axis" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: The tensor to split

  • -
-

Outputs

-

Between 1 and 2147483647 outputs.

-
    -
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSqrt#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqrt(*args, **kwargs)#
-

Version

-

Onnx name: Sqrt

-

This version of the operator has been available since -version 13.

-

Summary

-

Square root takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the square root is, y = x^0.5, is applied to -the tensor elementwise. If x is negative, then it will return NaN.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSqrt_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqrt_1(*args, **kwargs)#
-

Version

-

Onnx name: Sqrt

-

This version of the operator has been available since -version 1.

-

Summary

-

Square root takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the square root is, y = x^0.5, is applied to -the tensor elementwise. If x is negative, then it will return NaN.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSqrt_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqrt_13(*args, **kwargs)#
-

Version

-

Onnx name: Sqrt

-

This version of the operator has been available since -version 13.

-

Summary

-

Square root takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the square root is, y = x^0.5, is applied to -the tensor elementwise. If x is negative, then it will return NaN.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSqrt_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqrt_6(*args, **kwargs)#
-

Version

-

Onnx name: Sqrt

-

This version of the operator has been available since -version 6.

-

Summary

-

Square root takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the square root is, y = x^0.5, is applied to -the tensor elementwise. If x is negative, then it will return NaN.

-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSqueeze#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqueeze(*args, **kwargs)#
-

Version

-

Onnx name: Squeeze

-

This version of the operator has been available since -version 13.

-

Summary

-

Remove single-dimensional entries from the shape of a tensor. -Takes an input axes with a list of axes to squeeze. -If axes is not provided, all the single dimensions will be removed from -the shape. If an axis is selected with shape entry not equal to one, an error is raised.

-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • -
  • axes (optional, heterogeneous)tensor(int64): List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • -
-

Outputs

-
    -
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSqueeze_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqueeze_1(*args, **kwargs)#
-

Version

-

Onnx name: Squeeze

-

This version of the operator has been available since -version 1.

-

Summary

-

Remove single-dimensional entries from the shape of a tensor. -Takes a parameter axes with a list of axes to squeeze. -If axes is not provided, all the single dimensions will be removed from -the shape. If an axis is selected with shape entry not equal to one, an error is raised.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • -
-

Outputs

-
    -
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSqueeze_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqueeze_11(*args, **kwargs)#
-

Version

-

Onnx name: Squeeze

-

This version of the operator has been available since -version 11.

-

Summary

-

Remove single-dimensional entries from the shape of a tensor. -Takes a parameter axes with a list of axes to squeeze. -If axes is not provided, all the single dimensions will be removed from -the shape. If an axis is selected with shape entry not equal to one, an error is raised.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • -
-

Outputs

-
    -
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxSqueeze_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSqueeze_13(*args, **kwargs)#
-

Version

-

Onnx name: Squeeze

-

This version of the operator has been available since -version 13.

-

Summary

-

Remove single-dimensional entries from the shape of a tensor. -Takes an input axes with a list of axes to squeeze. -If axes is not provided, all the single dimensions will be removed from -the shape. If an axis is selected with shape entry not equal to one, an error is raised.

-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • -
  • axes (optional, heterogeneous)tensor(int64): List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • -
-

Outputs

-
    -
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxStringNormalizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxStringNormalizer(*args, **kwargs)#
-

Version

-

Onnx name: StringNormalizer

-

This version of the operator has been available since -version 10.

-

Summary

-

StringNormalization performs string operations for basic cleaning. -This operator has only one input (denoted by X) and only one output -(denoted by Y). This operator first examines the elements in the X, -and removes elements specified in “stopwords” attribute. -After removing stop words, the intermediate result can be further lowercased, -uppercased, or just returned depending the “case_change_action” attribute. -This operator only accepts [C]- and [1, C]-tensor. -If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] -if input shape is [C] and shape [1, 1] if input shape is [1, C].

-

Attributes

-
    -
  • case_change_action: string enum that cases output to be lowercased/uppercases/unchanged. Valid values are “LOWER”, “UPPER”, “NONE”. Default is “NONE” Default value is -name: "case_change_action" s: "NONE" type: STRING

  • -
  • is_case_sensitive: Boolean. Whether the identification of stop words in X is case-sensitive. Default is false Default value is -name: "is_case_sensitive" i: 0 type: INT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)tensor(string): UTF-8 strings to normalize

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(string): UTF-8 Normalized strings

  • -
-
- -
-
-
-
-

OnnxStringNormalizer_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxStringNormalizer_10(*args, **kwargs)#
-

Version

-

Onnx name: StringNormalizer

-

This version of the operator has been available since -version 10.

-

Summary

-

StringNormalization performs string operations for basic cleaning. -This operator has only one input (denoted by X) and only one output -(denoted by Y). This operator first examines the elements in the X, -and removes elements specified in “stopwords” attribute. -After removing stop words, the intermediate result can be further lowercased, -uppercased, or just returned depending the “case_change_action” attribute. -This operator only accepts [C]- and [1, C]-tensor. -If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] -if input shape is [C] and shape [1, 1] if input shape is [1, C].

-

Attributes

-
    -
  • case_change_action: string enum that cases output to be lowercased/uppercases/unchanged. Valid values are “LOWER”, “UPPER”, “NONE”. Default is “NONE” Default value is -name: "case_change_action" s: "NONE" type: STRING

  • -
  • is_case_sensitive: Boolean. Whether the identification of stop words in X is case-sensitive. Default is false Default value is -name: "is_case_sensitive" i: 0 type: INT

  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)tensor(string): UTF-8 strings to normalize

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(string): UTF-8 Normalized strings

  • -
-
- -
-
-
-
-

OnnxSub#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSub_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub_1(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 1.

-

Summary

-

Performs element-wise binary subtraction (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSub_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub_13(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 13.

-

Summary

-

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSub_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub_14(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 14.

-

Summary

-

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSub_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub_6(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 6.

-

Summary

-

Performs element-wise binary subtraction (with limited broadcast support).

-

If necessary the right-hand-side argument will be broadcasted to match the -shape of left-hand-side argument. When broadcasting is specified, the second -tensor can either be of element size 1 (including a scalar tensor and any -tensor with rank equal to or smaller than the first tensor), or having its -shape as a contiguous subset of the first tensor’s shape. The starting of the -mutually equal shape is specified by the argument “axis”, and if it is not set, -suffix matching is assumed. 1-dim expansion doesn’t work yet.

-

For example, the following tensor shapes are supported (with broadcast=1):

-
-

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor -shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor -shape(A) = (2, 3, 4, 5), shape(B) = (5,) -shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) -shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 -shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

-
-

Attribute broadcast=1 needs to be passed to enable broadcasting.

-

Attributes

-
    -
  • -
  • broadcast: Pass 1 to enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • -
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSub_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSub_7(*args, **kwargs)#
-

Version

-

Onnx name: Sub

-

This version of the operator has been available since -version 7.

-

Summary

-

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First operand.

  • -
  • B (heterogeneous)T: Second operand.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T: Result, has same element type as two inputs

  • -
-

Type Constraints

-
    -
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • -
-
- -
-
-
-
-

OnnxSum#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSum(*args, **kwargs)#
-

Version

-

Onnx name: Sum

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • -
-

Outputs

-
    -
  • sum (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSum_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSum_1(*args, **kwargs)#
-

Version

-

Onnx name: Sum

-

This version of the operator has been available since -version 1.

-

Summary

-

Element-wise sum of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Attributes

-
    -
  • -
-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Sum.

  • -
-

Outputs

-
    -
  • sum (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSum_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSum_13(*args, **kwargs)#
-

Version

-

Onnx name: Sum

-

This version of the operator has been available since -version 13.

-

Summary

-

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • -
-

Outputs

-
    -
  • sum (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSum_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSum_6(*args, **kwargs)#
-

Version

-

Onnx name: Sum

-

This version of the operator has been available since -version 6.

-

Summary

-

Element-wise sum of each of the input tensors. All inputs and outputs must -have the same shape and data type.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for Sum.

  • -
-

Outputs

-
    -
  • sum (heterogeneous)T: Output tensor. Same dimension as inputs.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxSum_8#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxSum_8(*args, **kwargs)#
-

Version

-

Onnx name: Sum

-

This version of the operator has been available since -version 8.

-

Summary

-

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). -All inputs and outputs must have the same data type. -This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-

Between 1 and 2147483647 inputs.

-
    -
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • -
-

Outputs

-
    -
  • sum (heterogeneous)T: Output tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTan#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTan(*args, **kwargs)#
-

Version

-

Onnx name: Tan

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the tangent of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The tangent of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTan_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTan_7(*args, **kwargs)#
-

Version

-

Onnx name: Tan

-

This version of the operator has been available since -version 7.

-

Summary

-

Calculates the tangent of the given input tensor, element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The tangent of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTanh#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTanh(*args, **kwargs)#
-

Version

-

Onnx name: Tanh

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the hyperbolic tangent of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTanh_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTanh_1(*args, **kwargs)#
-

Version

-

Onnx name: Tanh

-

This version of the operator has been available since -version 1.

-

Summary

-

Calculates the hyperbolic tangent of the given input tensor element-wise.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • input (heterogeneous)T: 1-D input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTanh_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTanh_13(*args, **kwargs)#
-

Version

-

Onnx name: Tanh

-

This version of the operator has been available since -version 13.

-

Summary

-

Calculates the hyperbolic tangent of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTanh_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTanh_6(*args, **kwargs)#
-

Version

-

Onnx name: Tanh

-

This version of the operator has been available since -version 6.

-

Summary

-

Calculates the hyperbolic tangent of the given input tensor element-wise.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTfIdfVectorizer#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer(*args, **kwargs)#
-

Version

-

Onnx name: TfIdfVectorizer

-

This version of the operator has been available since -version 9.

-

Summary

-

This transform extracts n-grams from the input sequence and save them as a vector. Input can -be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. -For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. -More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. -If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.

-

In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original -sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. -If the number of skips is 2, we should skip two tokens when scanning through the original sequence. -Let’s consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. -The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. -If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] -indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.

-

The output vector (denoted by Y) stores the count of each n-gram; -Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping -between index i and the corresponding n-gram’s output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], -ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], -respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. -Note that we may consider all skips up to S when generating the n-grams.

-

The examples used above are true if mode is “TF”. If mode is “IDF”, all the counts larger than 1 would be truncated to 1 and -the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is “TFIDF”, -this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.

-

Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. -If pool_strings is set, the input must be a string tensor.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input for n-gram extraction

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: Ngram results

  • -
-

Type Constraints

-
    -
  • T tensor(string), tensor(int32), tensor(int64): Input is ether string UTF-8 or int32/int64

  • -
  • T1 tensor(float): 1-D tensor of floats

  • -
-
- -
-
-
-
-

OnnxTfIdfVectorizer_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer_9(*args, **kwargs)#
-

Version

-

Onnx name: TfIdfVectorizer

-

This version of the operator has been available since -version 9.

-

Summary

-

This transform extracts n-grams from the input sequence and save them as a vector. Input can -be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. -For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. -More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. -If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.

-

In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original -sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. -If the number of skips is 2, we should skip two tokens when scanning through the original sequence. -Let’s consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. -The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. -If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] -indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.

-

The output vector (denoted by Y) stores the count of each n-gram; -Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping -between index i and the corresponding n-gram’s output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], -ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], -respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. -Note that we may consider all skips up to S when generating the n-grams.

-

The examples used above are true if mode is “TF”. If mode is “IDF”, all the counts larger than 1 would be truncated to 1 and -the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is “TFIDF”, -this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.

-

Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. -If pool_strings is set, the input must be a string tensor.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input for n-gram extraction

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T1: Ngram results

  • -
-

Type Constraints

-
    -
  • T tensor(string), tensor(int32), tensor(int64): Input is ether string UTF-8 or int32/int64

  • -
  • T1 tensor(float): 1-D tensor of floats

  • -
-
- -
-
-
-
-

OnnxThresholdedRelu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu(*args, **kwargs)#
-

Version

-

Onnx name: ThresholdedRelu

-

This version of the operator has been available since -version 10.

-

Summary

-

ThresholdedRelu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Threshold value Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxThresholdedRelu_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu_10(*args, **kwargs)#
-

Version

-

Onnx name: ThresholdedRelu

-

This version of the operator has been available since -version 10.

-

Summary

-

ThresholdedRelu takes one input data (Tensor<T>) and produces one output data -(Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, -is applied to the tensor elementwise.

-

Attributes

-
    -
  • alpha: Threshold value Default value is -name: "alpha" f: 1.0 type: FLOAT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: Output tensor

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
-
- -
-
-
-
-

OnnxTile#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTile(*args, **kwargs)#
-

Version

-

Onnx name: Tile

-

This version of the operator has been available since -version 13.

-

Summary

-

Constructs a tensor by tiling a given tensor. -This is the same as function tile in Numpy, but no broadcast. -For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of any shape.

  • -
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of the same dimension and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • -
-
- -
-
-
-
-

OnnxTile_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTile_1(*args, **kwargs)#
-

Version

-

Onnx name: Tile

-

This version of the operator has been available since -version 1.

-

Summary

-

Repeat the elements of a tensor along an axis.

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of any shape.

  • -
  • tiles (heterogeneous)T: Number of repeated copies to make of the input tensor.

  • -
  • axis (heterogeneous)T: Axis along which to repeat.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of same shape and type as input.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • -
  • T1 tensor(int64): Constrain tiles and axis’s type to int64 tensors.

  • -
-
- -
-
-
-
-

OnnxTile_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTile_13(*args, **kwargs)#
-

Version

-

Onnx name: Tile

-

This version of the operator has been available since -version 13.

-

Summary

-

Constructs a tensor by tiling a given tensor. -This is the same as function tile in Numpy, but no broadcast. -For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of any shape.

  • -
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of the same dimension and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • -
-
- -
-
-
-
-

OnnxTile_6#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTile_6(*args, **kwargs)#
-

Version

-

Onnx name: Tile

-

This version of the operator has been available since -version 6.

-

Summary

-

Constructs a tensor by tiling a given tensor. -This is the same as function tile in Numpy, but no broadcast. -For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

-

Inputs

-
    -
  • input (heterogeneous)T: Input tensor of any shape.

  • -
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of the same dimension and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • -
-
- -
-
-
-
-

OnnxTopK#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTopK(*args, **kwargs)#
-

Version

-

Onnx name: TopK

-

This version of the operator has been available since -version 11.

-

Summary

-

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of -shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

-
-
-
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

-
-
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input -tensor).

-
-
-
-

If “largest” is 1 (the default value) then the k largest elements are returned. -If “sorted” is 1 (the default value) then the resulting k elements will be sorted. -If “sorted” is 0, order of returned ‘Values’ and ‘Indices’ are undefined.

-
-
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

-
-
-

Attributes

-
    -
  • axis: Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: -1 type: INT

  • -
  • largest: Whether to return the top-K largest or smallest elements. Default value is -name: "largest" i: 1 type: INT

  • -
  • sorted: Whether to return the elements in sorted order. Default value is -name: "sorted" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • -
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • -
-

Outputs

-
    -
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • -
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxTopK_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTopK_1(*args, **kwargs)#
-

Version

-

Onnx name: TopK

-

This version of the operator has been available since -version 1.

-

Summary

-

Retrieve the top-K elements along a specified axis. Given an input tensor of -shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

-
-
-
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

-
-
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input -tensor).

-
-
-
-
-
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

-
-
-

Attributes

-
    -
  • axis: Dimension on which to do the sort. Default value is -name: "axis" i: -1 type: INT

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • -
-

Outputs

-
    -
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • -
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxTopK_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTopK_10(*args, **kwargs)#
-

Version

-

Onnx name: TopK

-

This version of the operator has been available since -version 10.

-

Summary

-

Retrieve the top-K elements along a specified axis. Given an input tensor of -shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

-
-
-
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

-
-
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input -tensor).

-
-
-
-
-
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

-
-
-

Attributes

-
    -
  • axis: Dimension on which to do the sort. Default value is -name: "axis" i: -1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • -
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • -
-

Outputs

-
    -
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • -
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • -
-

Type Constraints

-
    -
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxTopK_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTopK_11(*args, **kwargs)#
-

Version

-

Onnx name: TopK

-

This version of the operator has been available since -version 11.

-

Summary

-

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of -shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

-
-
-
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

-
-
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input -tensor).

-
-
-
-

If “largest” is 1 (the default value) then the k largest elements are returned. -If “sorted” is 1 (the default value) then the resulting k elements will be sorted. -If “sorted” is 0, order of returned ‘Values’ and ‘Indices’ are undefined.

-
-
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

-
-
-

Attributes

-
    -
  • axis: Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is -name: "axis" i: -1 type: INT

  • -
  • largest: Whether to return the top-K largest or smallest elements. Default value is -name: "largest" i: 1 type: INT

  • -
  • sorted: Whether to return the elements in sorted order. Default value is -name: "sorted" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • -
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • -
-

Outputs

-
    -
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • -
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • -
  • I tensor(int64): Constrain index tensor to int64

  • -
-
- -
-
-
-
-

OnnxTranspose#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTranspose(*args, **kwargs)#
-

Version

-

Onnx name: Transpose

-

This version of the operator has been available since -version 13.

-

Summary

-

Transpose the input tensor similar to numpy.transpose. For example, when -perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape -will be (2, 1, 3).

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • transposed (heterogeneous)T: Transposed output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxTranspose_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTranspose_1(*args, **kwargs)#
-

Version

-

Onnx name: Transpose

-

This version of the operator has been available since -version 1.

-

Summary

-

Transpose the input tensor similar to numpy.transpose. For example, when -perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape -will be (2, 1, 3).

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • transposed (heterogeneous)T: Transposed output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxTranspose_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTranspose_13(*args, **kwargs)#
-

Version

-

Onnx name: Transpose

-

This version of the operator has been available since -version 13.

-

Summary

-

Transpose the input tensor similar to numpy.transpose. For example, when -perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape -will be (2, 1, 3).

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: An input tensor.

  • -
-

Outputs

-
    -
  • transposed (heterogeneous)T: Transposed output.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleClassifier#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleClassifier

-

This version of the operator has been available since -version 3 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble classifier. Returns the top class for each of N inputs.

-

The attributes named ‘nodes_X’ form a sequence of tuples, associated by -index into the sequences, which must all be of equal length. These tuples -define the nodes.

-

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. -A leaf may have multiple votes, where each vote is weighted by -the associated class_weights index.

-

One and only one of classlabels_strings or classlabels_int64s -will be defined. The class_ids are indices into this list. -All fields ending with <i>_as_tensor</i> can be used instead of the -same parameter without the suffix if the element type is double and not float.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: N, Top class for each point

  • -
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleClassifier_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_1(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleClassifier

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble classifier. Returns the top class for each of N inputs.

-

The attributes named ‘nodes_X’ form a sequence of tuples, associated by -index into the sequences, which must all be of equal length. These tuples -define the nodes.

-

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. -A leaf may have multiple votes, where each vote is weighted by -the associated class_weights index.

-

One and only one of classlabels_strings or classlabels_int64s -will be defined. The class_ids are indices into this list.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: N, Top class for each point

  • -
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleClassifier_3#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_3(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleClassifier

-

This version of the operator has been available since -version 3 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble classifier. Returns the top class for each of N inputs.

-

The attributes named ‘nodes_X’ form a sequence of tuples, associated by -index into the sequences, which must all be of equal length. These tuples -define the nodes.

-

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. -A leaf may have multiple votes, where each vote is weighted by -the associated class_weights index.

-

One and only one of classlabels_strings or classlabels_int64s -will be defined. The class_ids are indices into this list. -All fields ending with <i>_as_tensor</i> can be used instead of the -same parameter without the suffix if the element type is double and not float.

-

Attributes

-
    -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T1: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T2: N, Top class for each point

  • -
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • -
-

Type Constraints

-
    -
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleRegressor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleRegressor

-

This version of the operator has been available since -version 3 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble regressor. Returns the regressed values for each input in N.

-

All args with nodes_ are fields of a tuple of tree nodes, and -it is assumed they are the same length, and an index i will decode the -tuple across these inputs. Each node id can appear only once -for each tree id.

-

All fields prefixed with target_ are tuples of votes at the leaves.

-

A leaf may have multiple votes, where each vote is weighted by -the associated target_weights index.

-

All fields ending with <i>_as_tensor</i> can be used instead of the -same parameter without the suffix if the element type is double and not float. -All trees must have their node ids start at 0 and increment by 1.

-

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

-

Attributes

-
    -
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is -name: "aggregate_function" s: "SUM" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): N classes

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleRegressor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_1(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleRegressor

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble regressor. Returns the regressed values for each input in N.

-

All args with nodes_ are fields of a tuple of tree nodes, and -it is assumed they are the same length, and an index i will decode the -tuple across these inputs. Each node id can appear only once -for each tree id.

-

All fields prefixed with target_ are tuples of votes at the leaves.

-

A leaf may have multiple votes, where each vote is weighted by -the associated target_weights index.

-

All trees must have their node ids start at 0 and increment by 1.

-

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

-

Attributes

-
    -
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is -name: "aggregate_function" s: "SUM" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): N classes

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxTreeEnsembleRegressor_3#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_3(*args, **kwargs)#
-

Version

-

Onnx name: TreeEnsembleRegressor

-

This version of the operator has been available since -version 3 of domain ai.onnx.ml.

-

Summary

-

Tree Ensemble regressor. Returns the regressed values for each input in N.

-

All args with nodes_ are fields of a tuple of tree nodes, and -it is assumed they are the same length, and an index i will decode the -tuple across these inputs. Each node id can appear only once -for each tree id.

-

All fields prefixed with target_ are tuples of votes at the leaves.

-

A leaf may have multiple votes, where each vote is weighted by -the associated target_weights index.

-

All fields ending with <i>_as_tensor</i> can be used instead of the -same parameter without the suffix if the element type is double and not float. -All trees must have their node ids start at 0 and increment by 1.

-

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

-

Attributes

-
    -
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is -name: "aggregate_function" s: "SUM" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • -
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is -name: "post_transform" s: "NONE" type: STRING

  • -
  • -
  • -
  • -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: Input of shape [N,F]

  • -
-

Outputs

-
    -
  • Y (heterogeneous)tensor(float): N classes

  • -
-

Type Constraints

-
    -
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • -
-
- -
-
-
-
-

OnnxTrilu#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTrilu(*args, **kwargs)#
-

Version

-

Onnx name: Trilu

-

This version of the operator has been available since -version 14.

-

Summary

-

Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). -The attribute “upper” determines whether the upper or lower part is retained. If set to true, -the upper triangular matrix is retained. Lower triangular matrix is retained otherwise. -Default value for the “upper” attribute is true. -Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists -of the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal. -All other elements in the matrix are set to zero. -If k = 0, the triangular part on and above/below the main diagonal is retained. -If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it. -A negative k value retains the main diagonal and |k| diagonals below it. -If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. -A negative k value excludes the main diagonal and (|k|-1) diagonals below it.

-

Attributes

-
    -
  • upper: Boolean. Indicates whether upper or lower part of matrix is retained. Default is true. Default value is -name: "upper" i: 1 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: Input tensor of rank 2 or higher.

  • -
  • k (optional, heterogeneous)tensor(int64): A 0-D tensor containing a single value corresponding to the number diagonals above or below the main diagonal to exclude or include. Default value is 0 if it’s not specified.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of the same type and shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxTrilu_14#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxTrilu_14(*args, **kwargs)#
-

Version

-

Onnx name: Trilu

-

This version of the operator has been available since -version 14.

-

Summary

-

Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). -The attribute “upper” determines whether the upper or lower part is retained. If set to true, -the upper triangular matrix is retained. Lower triangular matrix is retained otherwise. -Default value for the “upper” attribute is true. -Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists -of the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal. -All other elements in the matrix are set to zero. -If k = 0, the triangular part on and above/below the main diagonal is retained. -If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it. -A negative k value retains the main diagonal and |k| diagonals below it. -If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. -A negative k value excludes the main diagonal and (|k|-1) diagonals below it.

-

Attributes

-
    -
  • upper: Boolean. Indicates whether upper or lower part of matrix is retained. Default is true. Default value is -name: "upper" i: 1 type: INT

  • -
-

Inputs

-

Between 1 and 2 inputs.

-
    -
  • input (heterogeneous)T: Input tensor of rank 2 or higher.

  • -
  • k (optional, heterogeneous)tensor(int64): A 0-D tensor containing a single value corresponding to the number diagonals above or below the main diagonal to exclude or include. Default value is 0 if it’s not specified.

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Output tensor of the same type and shape as the input tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUnique#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnique(*args, **kwargs)#
-

Version

-

Onnx name: Unique

-

This version of the operator has been available since -version 11.

-

Summary

-

Find the unique elements of a tensor. When an optional attribute ‘axis’ is provided, unique subtensors sliced along the ‘axis’ are returned. -Otherwise the input tensor is flattened and unique values of the flattened tensor are returned.

-

This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. -The first output tensor ‘Y’ contains all unique values or subtensors of the input. -The second optional output tensor ‘indices’ contains indices of ‘Y’ elements’ first occurance in ‘X’.. -The third optional output tensor ‘inverse_indices’ contains, for elements of ‘X’, its corresponding indices in ‘Y’. “. -The fourth optional output tensor ‘counts’ contains the count of each element of ‘Y’ in the input.

-

Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.

-

https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html

-
-
Example 1:

input_X = [2, 1, 1, 3, 4, 3] -attribute_sorted = 0 -attribute_axis = None -output_Y = [2, 1, 3, 4] -output_indices = [0, 1, 3, 4] -output_inverse_indices = [0, 1, 1, 2, 3, 2] -output_counts = [1, 2, 2, 1]

-
-
Example 2:

input_X = [[1, 3], [2, 3]] -attribute_sorted = 1 -attribute_axis = None -output_Y = [1, 2, 3] -output_indices = [0, 2, 1] -output_inverse_indices = [0, 2, 1, 2] -output_counts = [1, 1, 2]

-
-
Example 3:

input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] -attribute_sorted = 1 -attribute_axis = 0 -output_Y = [[1, 0, 0], [2, 3, 4]] -output_indices = [0, 2] -output_inverse_indices = [0, 0, 1] -output_counts = [2, 1]

-
-
Example 4:
-
input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],

[[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]

-
-
-

attribute_sorted = 1 -attribute_axis = 1

-

intermediate data are presented below for better understanding:

-

there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): -A: [[1, 1], [1, 1]],

-
-

[[0, 1], [0, 1]], -[[2, 1], [2, 1]], -[[0, 1], [0, 1]].

-
-

there are 3 unique subtensors: -[[1, 1], [1, 1]], -[[0, 1], [0, 1]], -[[2, 1], [2, 1]].

-

sorted unique subtensors: -B: [[0, 1], [0, 1]],

-
-

[[1, 1], [1, 1]], -[[2, 1], [2, 1]].

-
-

output_Y is constructed from B: -[[[0. 1.], [1. 1.], [2. 1.]],

-
-

[[0. 1.], [1. 1.], [2. 1.]]]

-
-

output_indices is to map from B to A: -[1, 0, 2]

-

output_inverse_indices is to map from A to B: -[1, 0, 2, 0]

-

output_counts = [2 1 1]

-
-
-

Attributes

-
    -
  • -
  • sorted: (Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default). Default value is -name: "sorted" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: A N-D input tensor that is to be processed.

  • -
-

Outputs

-

Between 1 and 4 outputs.

-
    -
  • Y (heterogeneous)T: A tensor of the same type as ‘X’ containing all the unique values or subtensors sliced along a provided ‘axis’ in ‘X’, either sorted or maintained in the same order they occur in input ‘X’

  • -
  • indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing indices of ‘Y’ elements’ first occurance in ‘X’. When ‘axis’ is provided, it contains indices to subtensors in input ‘X’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in the flattened input tensor.

  • -
  • inverse_indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing, for elements of ‘X’, its corresponding indices in ‘Y’. When ‘axis’ is provided, it contains indices to subtensors in output ‘Y’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in output ‘Y’.

  • -
  • counts (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing the count of each element of ‘Y’ in input ‘X’

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input can be of any tensor type.

  • -
-
- -
-
-
-
-

OnnxUnique_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnique_11(*args, **kwargs)#
-

Version

-

Onnx name: Unique

-

This version of the operator has been available since -version 11.

-

Summary

-

Find the unique elements of a tensor. When an optional attribute ‘axis’ is provided, unique subtensors sliced along the ‘axis’ are returned. -Otherwise the input tensor is flattened and unique values of the flattened tensor are returned.

-

This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. -The first output tensor ‘Y’ contains all unique values or subtensors of the input. -The second optional output tensor ‘indices’ contains indices of ‘Y’ elements’ first occurance in ‘X’.. -The third optional output tensor ‘inverse_indices’ contains, for elements of ‘X’, its corresponding indices in ‘Y’. “. -The fourth optional output tensor ‘counts’ contains the count of each element of ‘Y’ in the input.

-

Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.

-

https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html

-
-
Example 1:

input_X = [2, 1, 1, 3, 4, 3] -attribute_sorted = 0 -attribute_axis = None -output_Y = [2, 1, 3, 4] -output_indices = [0, 1, 3, 4] -output_inverse_indices = [0, 1, 1, 2, 3, 2] -output_counts = [1, 2, 2, 1]

-
-
Example 2:

input_X = [[1, 3], [2, 3]] -attribute_sorted = 1 -attribute_axis = None -output_Y = [1, 2, 3] -output_indices = [0, 2, 1] -output_inverse_indices = [0, 2, 1, 2] -output_counts = [1, 1, 2]

-
-
Example 3:

input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] -attribute_sorted = 1 -attribute_axis = 0 -output_Y = [[1, 0, 0], [2, 3, 4]] -output_indices = [0, 2] -output_inverse_indices = [0, 0, 1] -output_counts = [2, 1]

-
-
Example 4:
-
input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],

[[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]

-
-
-

attribute_sorted = 1 -attribute_axis = 1

-

intermediate data are presented below for better understanding:

-

there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): -A: [[1, 1], [1, 1]],

-
-

[[0, 1], [0, 1]], -[[2, 1], [2, 1]], -[[0, 1], [0, 1]].

-
-

there are 3 unique subtensors: -[[1, 1], [1, 1]], -[[0, 1], [0, 1]], -[[2, 1], [2, 1]].

-

sorted unique subtensors: -B: [[0, 1], [0, 1]],

-
-

[[1, 1], [1, 1]], -[[2, 1], [2, 1]].

-
-

output_Y is constructed from B: -[[[0. 1.], [1. 1.], [2. 1.]],

-
-

[[0. 1.], [1. 1.], [2. 1.]]]

-
-

output_indices is to map from B to A: -[1, 0, 2]

-

output_inverse_indices is to map from A to B: -[1, 0, 2, 0]

-

output_counts = [2 1 1]

-
-
-

Attributes

-
    -
  • -
  • sorted: (Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default). Default value is -name: "sorted" i: 1 type: INT

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: A N-D input tensor that is to be processed.

  • -
-

Outputs

-

Between 1 and 4 outputs.

-
    -
  • Y (heterogeneous)T: A tensor of the same type as ‘X’ containing all the unique values or subtensors sliced along a provided ‘axis’ in ‘X’, either sorted or maintained in the same order they occur in input ‘X’

  • -
  • indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing indices of ‘Y’ elements’ first occurance in ‘X’. When ‘axis’ is provided, it contains indices to subtensors in input ‘X’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in the flattened input tensor.

  • -
  • inverse_indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing, for elements of ‘X’, its corresponding indices in ‘Y’. When ‘axis’ is provided, it contains indices to subtensors in output ‘Y’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in output ‘Y’.

  • -
  • counts (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing the count of each element of ‘Y’ in input ‘X’

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input can be of any tensor type.

  • -
-
- -
-
-
-
-

OnnxUnsqueeze#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze(*args, **kwargs)#
-

Version

-

Onnx name: Unsqueeze

-

This version of the operator has been available since -version 13.

-

Summary

-

Insert single-dimensional entries to the shape of an input tensor (data). -Takes one required input axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

-
-
For example:

Given an input tensor (data) of shape [3, 4, 5], then -Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

-
-
-

The input axes should not contain any duplicate entries. It is an error if it contains duplicates. -The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. -Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. -The order of values in axes does not matter and can come in any order.

-

Inputs

-
    -
  • data (heterogeneous)T: Original tensor

  • -
  • axes (heterogeneous)tensor(int64): List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded).

  • -
-

Outputs

-
    -
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUnsqueeze_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_1(*args, **kwargs)#
-

Version

-

Onnx name: Unsqueeze

-

This version of the operator has been available since -version 1.

-

Summary

-

Insert single-dimensional entries to the shape of a tensor. -Takes one required argument axes, a list of dimensions that will be inserted. -Dimension indices in axes are as seen in the output tensor. For example:

-
-

Given a tensor such that tensor with shape [3, 4, 5], then -Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]

-
-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Original tensor

  • -
-

Outputs

-
    -
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUnsqueeze_11#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_11(*args, **kwargs)#
-

Version

-

Onnx name: Unsqueeze

-

This version of the operator has been available since -version 11.

-

Summary

-

Insert single-dimensional entries to the shape of an input tensor (data). -Takes one required argument axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

-
-
For example:

Given an input tensor (data) of shape [3, 4, 5], then -Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

-
-
-

The attribute axes should not contain any duplicate entries. It is an error if it contains duplicates. -The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. -Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. -The order of values in axes does not matter and can come in any order.

-

Attributes

-
    -
  • -
-

Inputs

-
    -
  • data (heterogeneous)T: Original tensor

  • -
-

Outputs

-
    -
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUnsqueeze_13#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_13(*args, **kwargs)#
-

Version

-

Onnx name: Unsqueeze

-

This version of the operator has been available since -version 13.

-

Summary

-

Insert single-dimensional entries to the shape of an input tensor (data). -Takes one required input axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

-
-
For example:

Given an input tensor (data) of shape [3, 4, 5], then -Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

-
-
-

The input axes should not contain any duplicate entries. It is an error if it contains duplicates. -The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. -Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. -The order of values in axes does not matter and can come in any order.

-

Inputs

-
    -
  • data (heterogeneous)T: Original tensor

  • -
  • axes (heterogeneous)tensor(int64): List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded).

  • -
-

Outputs

-
    -
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUpsample#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUpsample(*args, **kwargs)#
-

Version

-

Onnx name: Upsample

-

This version of the operator has been deprecated since -version 10.

-

Summary

-

Upsample the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * scale).

-
-

Attributes

-
    -
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is -name: "mode" s: "nearest" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: N-D tensor

  • -
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUpsample_10#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUpsample_10(*args, **kwargs)#
-

Version

-

Onnx name: Upsample

-

This version of the operator has been deprecated since -version 10.

-

Summary

-

Upsample the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * scale).

-
-

Attributes

-
    -
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is -name: "mode" s: "nearest" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: N-D tensor

  • -
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUpsample_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUpsample_7(*args, **kwargs)#
-

Version

-

Onnx name: Upsample

-

This version of the operator has been available since -version 7.

-

Summary

-

Upsample the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * scale).

-
-

Attributes

-
    -
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is -name: "mode" s: "nearest" type: STRING

  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)T: N-D tensor

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxUpsample_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxUpsample_9(*args, **kwargs)#
-

Version

-

Onnx name: Upsample

-

This version of the operator has been available since -version 9.

-

Summary

-

Upsample the input tensor. -Each dimension value of the output tensor is:

-
-

output_dimension = floor(input_dimension * scale).

-
-

Attributes

-
    -
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is -name: "mode" s: "nearest" type: STRING

  • -
-

Inputs

-
    -
  • X (heterogeneous)T: N-D tensor

  • -
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • -
-

Outputs

-
    -
  • Y (heterogeneous)T: N-D tensor after resizing

  • -
-

Type Constraints

-
    -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • -
-
- -
-
-
-
-

OnnxWhere#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxWhere(*args, **kwargs)#
-

Version

-

Onnx name: Where

-

This version of the operator has been available since -version 16.

-

Summary

-

Return elements, either from X or Y, depending on condition. -Where behaves like -[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) -with three parameters.

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

History -- Version 16 adds bfloat16 to the types allowed (for the second and third parameter).

-

Inputs

-
    -
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • -
  • X (heterogeneous)T: values selected at indices where condition is True

  • -
  • Y (heterogeneous)T: values selected at indices where condition is False

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • -
-

Type Constraints

-
    -
  • B tensor(bool): Constrain to boolean tensors.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types (including bfloat).

  • -
-
- -
-
-
-
-

OnnxWhere_16#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxWhere_16(*args, **kwargs)#
-

Version

-

Onnx name: Where

-

This version of the operator has been available since -version 16.

-

Summary

-

Return elements, either from X or Y, depending on condition. -Where behaves like -[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) -with three parameters.

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

History -- Version 16 adds bfloat16 to the types allowed (for the second and third parameter).

-

Inputs

-
    -
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • -
  • X (heterogeneous)T: values selected at indices where condition is True

  • -
  • Y (heterogeneous)T: values selected at indices where condition is False

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • -
-

Type Constraints

-
    -
  • B tensor(bool): Constrain to boolean tensors.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types (including bfloat).

  • -
-
- -
-
-
-
-

OnnxWhere_9#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxWhere_9(*args, **kwargs)#
-

Version

-

Onnx name: Where

-

This version of the operator has been available since -version 9.

-

Summary

-

Return elements, either from X or Y, depending on condition. -Where behaves like -[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) -with three parameters.

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • -
  • X (heterogeneous)T: values selected at indices where condition is True

  • -
  • Y (heterogeneous)T: values selected at indices where condition is False

  • -
-

Outputs

-
    -
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • -
-

Type Constraints

-
    -
  • B tensor(bool): Constrain to boolean tensors.

  • -
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • -
-
- -
-
-
-
-

OnnxXor#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxXor(*args, **kwargs)#
-

Version

-

Onnx name: Xor

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the xor logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxXor_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxXor_1(*args, **kwargs)#
-

Version

-

Onnx name: Xor

-

This version of the operator has been available since -version 1.

-

Summary

-

Returns the tensor resulted from performing the xor logical operation -elementwise on the input tensors A and B.

-

If broadcasting is enabled, the right-hand-side argument will be broadcasted -to match the shape of left-hand-side argument. See the doc of Add for a -detailed description of the broadcasting rules.

-

Attributes

-
    -
  • -
  • broadcast: Enable broadcasting Default value is -name: "broadcast" i: 0 type: INT

  • -
-

Inputs

-
    -
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • -
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxXor_7#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxXor_7(*args, **kwargs)#
-

Version

-

Onnx name: Xor

-

This version of the operator has been available since -version 7.

-

Summary

-

Returns the tensor resulted from performing the xor logical operation -elementwise on the input tensors A and B (with Numpy-style broadcasting support).

-

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

-

Inputs

-
    -
  • A (heterogeneous)T: First input operand for the logical operator.

  • -
  • B (heterogeneous)T: Second input operand for the logical operator.

  • -
-

Outputs

-
    -
  • C (heterogeneous)T1: Result tensor.

  • -
-

Type Constraints

-
    -
  • T tensor(bool): Constrains input to boolean tensor.

  • -
  • T1 tensor(bool): Constrains output to boolean tensor.

  • -
-
- -
-
-
-
-

OnnxZipMap#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxZipMap(*args, **kwargs)#
-

Version

-

Onnx name: ZipMap

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Creates a map from the input and the attributes.

-

The values are provided by the input tensor, while the keys are specified by the attributes. -Must provide keys in either classlabels_strings or classlabels_int64s (but not both).

-

The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)tensor(float): The input values

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: The output map

  • -
-

Type Constraints

-
    -
  • T seq(map(string, float)), seq(map(int64, float)): The output will be a sequence of string or integer maps to float.

  • -
-
- -
-
-
-
-

OnnxZipMap_1#

-
-
-class skl2onnx.algebra.onnx_ops.OnnxZipMap_1(*args, **kwargs)#
-

Version

-

Onnx name: ZipMap

-

This version of the operator has been available since -version 1 of domain ai.onnx.ml.

-

Summary

-

Creates a map from the input and the attributes.

-

The values are provided by the input tensor, while the keys are specified by the attributes. -Must provide keys in either classlabels_strings or classlabels_int64s (but not both).

-

The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.

-

Attributes

-
    -
  • -
  • -
-

Inputs

-
    -
  • X (heterogeneous)tensor(float): The input values

  • -
-

Outputs

-
    -
  • Z (heterogeneous)T: The output map

  • -
-

Type Constraints

-
    -
  • T seq(map(string, float)), seq(map(int64, float)): The output will be a sequence of string or integer maps to float.

  • -
-
- -
-
-
-
-
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Supported scikit-learn Models - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Supported scikit-learn Models#

+

skl2onnx currently can convert the following list +of models for skl2onnx . They +were tested using onnxruntime . +All the following classes overloads the following methods +such as OnnxSklearnPipeline does. They wrap existing +scikit-learn classes by dynamically creating a new one +which inherits from OnnxOperatorMixin which +implements to_onnx methods.

+
+

Covered Converters#

+
+
+
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

Package

Supported

ARDRegression

linear_model

Yes

AdaBoostClassifier

ensemble

Yes

AdaBoostRegressor

ensemble

Yes

AdditiveChi2Sampler

kernel_approximation

AffinityPropagation

cluster

AgglomerativeClustering

cluster

BaggingClassifier

ensemble

Yes

BaggingRegressor

ensemble

Yes

BaseDecisionTree

tree

BaseEnsemble

ensemble

BayesianGaussianMixture

mixture

Yes

BayesianRidge

linear_model

Yes

BernoulliNB

naive_bayes

Yes

BernoulliRBM

neural_network

Binarizer

preprocessing

Yes

Birch

cluster

BisectingKMeans

cluster

CCA

cross_decomposition

CalibratedClassifierCV

calibration

Yes

CategoricalNB

naive_bayes

Yes

ClassifierChain

multioutput

ComplementNB

naive_bayes

Yes

DBSCAN

cluster

DecisionTreeClassifier

tree

Yes

DecisionTreeRegressor

tree

Yes

DictVectorizer

feature_extraction

Yes

DictionaryLearning

decomposition

ElasticNet

linear_model

Yes

ElasticNetCV

linear_model

Yes

EllipticEnvelope

covariance

EmpiricalCovariance

covariance

ExtraTreeClassifier

tree

Yes

ExtraTreeRegressor

tree

Yes

ExtraTreesClassifier

ensemble

Yes

ExtraTreesRegressor

ensemble

Yes

FactorAnalysis

decomposition

FastICA

decomposition

FeatureAgglomeration

cluster

FeatureHasher

feature_extraction

Yes

FunctionTransformer

preprocessing

Yes

GammaRegressor

linear_model

Yes

GaussianMixture

mixture

Yes

GaussianNB

naive_bayes

Yes

GaussianProcessClassifier

gaussian_process

Yes

GaussianProcessRegressor

gaussian_process

Yes

GaussianRandomProjection

random_projection

Yes

GenericUnivariateSelect

feature_selection

Yes

GradientBoostingClassifier

ensemble

Yes

GradientBoostingRegressor

ensemble

Yes

GraphicalLasso

covariance

GraphicalLassoCV

covariance

GridSearchCV

model_selection

Yes

HistGradientBoostingClassifier

ensemble

Yes

HistGradientBoostingRegressor

ensemble

Yes

HuberRegressor

linear_model

Yes

IncrementalPCA

decomposition

Yes

IsolationForest

ensemble

Yes

IsotonicRegression

isotonic

KBinsDiscretizer

preprocessing

Yes

KMeans

cluster

Yes

KNNImputer

impute

Yes

KNeighborsClassifier

neighbors

Yes

KNeighborsRegressor

neighbors

Yes

KNeighborsTransformer

neighbors

Yes

KernelCenterer

preprocessing

Yes

KernelDensity

neighbors

KernelPCA

decomposition

Yes

KernelRidge

kernel_ridge

LabelBinarizer

preprocessing

Yes

LabelEncoder

preprocessing

Yes

LabelPropagation

semi_supervised

LabelSpreading

semi_supervised

Lars

linear_model

Yes

LarsCV

linear_model

Yes

Lasso

linear_model

Yes

LassoCV

linear_model

Yes

LassoLars

linear_model

Yes

LassoLarsCV

linear_model

Yes

LassoLarsIC

linear_model

Yes

LatentDirichletAllocation

decomposition

LedoitWolf

covariance

LinearDiscriminantAnalysis

discriminant_analysis

Yes

LinearRegression

linear_model

Yes

LinearSVC

svm

Yes

LinearSVR

svm

Yes

LocalOutlierFactor

neighbors

Yes

LogisticRegression

linear_model

Yes

LogisticRegressionCV

linear_model

Yes

MLPClassifier

neural_network

Yes

MLPRegressor

neural_network

Yes

MaxAbsScaler

preprocessing

Yes

MeanShift

cluster

MinCovDet

covariance

MinMaxScaler

preprocessing

Yes

MiniBatchDictionaryLearning

decomposition

MiniBatchKMeans

cluster

Yes

MiniBatchNMF

decomposition

MiniBatchSparsePCA

decomposition

MissingIndicator

impute

MultiLabelBinarizer

preprocessing

MultiOutputClassifier

multioutput

Yes

MultiOutputRegressor

multioutput

Yes

MultiTaskElasticNet

linear_model

Yes

MultiTaskElasticNetCV

linear_model

Yes

MultiTaskLasso

linear_model

Yes

MultiTaskLassoCV

linear_model

Yes

MultinomialNB

naive_bayes

Yes

NMF

decomposition

NearestCentroid

neighbors

NearestNeighbors

neighbors

Yes

NeighborhoodComponentsAnalysis

neighbors

Yes

Normalizer

preprocessing

Yes

NuSVC

svm

Yes

NuSVR

svm

Yes

Nystroem

kernel_approximation

OAS

covariance

OPTICS

cluster

OneClassSVM

svm

Yes

OneHotEncoder

preprocessing

Yes

OneVsOneClassifier

multiclass

Yes

OneVsRestClassifier

multiclass

Yes

OrdinalEncoder

preprocessing

Yes

OrthogonalMatchingPursuit

linear_model

Yes

OrthogonalMatchingPursuitCV

linear_model

Yes

OutputCodeClassifier

multiclass

PCA

decomposition

Yes

PLSCanonical

cross_decomposition

PLSRegression

cross_decomposition

Yes

PLSSVD

cross_decomposition

PassiveAggressiveClassifier

linear_model

Yes

PassiveAggressiveRegressor

linear_model

Yes

Perceptron

linear_model

Yes

PoissonRegressor

linear_model

Yes

PolynomialCountSketch

kernel_approximation

PolynomialFeatures

preprocessing

Yes

PowerTransformer

preprocessing

Yes

QuadraticDiscriminantAnalysis

discriminant_analysis

Yes

QuantileRegressor

linear_model

Yes

QuantileTransformer

preprocessing

RANSACRegressor

linear_model

Yes

RBFSampler

kernel_approximation

RFE

feature_selection

Yes

RFECV

feature_selection

Yes

RadiusNeighborsClassifier

neighbors

Yes

RadiusNeighborsRegressor

neighbors

Yes

RadiusNeighborsTransformer

neighbors

RandomForestClassifier

ensemble

Yes

RandomForestRegressor

ensemble

Yes

RandomTreesEmbedding

ensemble

Yes

RandomizedSearchCV

model_selection

RegressorChain

multioutput

Ridge

linear_model

Yes

RidgeCV

linear_model

Yes

RidgeClassifier

linear_model

Yes

RidgeClassifierCV

linear_model

Yes

RobustScaler

preprocessing

Yes

SGDClassifier

linear_model

Yes

SGDOneClassSVM

linear_model

Yes

SGDRegressor

linear_model

Yes

SVC

svm

Yes

SVR

svm

Yes

SelectFdr

feature_selection

Yes

SelectFpr

feature_selection

Yes

SelectFromModel

feature_selection

Yes

SelectFwe

feature_selection

Yes

SelectKBest

feature_selection

Yes

SelectPercentile

feature_selection

Yes

SelfTrainingClassifier

semi_supervised

SequentialFeatureSelector

feature_selection

ShrunkCovariance

covariance

SimpleImputer

impute

Yes

SkewedChi2Sampler

kernel_approximation

SparseCoder

decomposition

SparsePCA

decomposition

SparseRandomProjection

random_projection

SpectralBiclustering

cluster

SpectralClustering

cluster

SpectralCoclustering

cluster

SplineTransformer

preprocessing

StackingClassifier

ensemble

Yes

StackingRegressor

ensemble

Yes

StandardScaler

preprocessing

Yes

TheilSenRegressor

linear_model

Yes

TransformedTargetRegressor

compose

TruncatedSVD

decomposition

Yes

TweedieRegressor

linear_model

Yes

VarianceThreshold

feature_selection

Yes

VotingClassifier

ensemble

Yes

VotingRegressor

ensemble

Yes

+
+

scikit-learn’s version is 1.3.dev0. +130/189 models are covered.

+
+
+
+
+

Converters Documentation#

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

OnnxBooster

OnnxSklearnKMeans

OnnxSklearnPipeline

OnnxCastRegressor

OnnxSklearnKNNImputer

OnnxSklearnPoissonRegressor

OnnxCastTransformer

OnnxSklearnKNeighborsClassifier

OnnxSklearnPolynomialFeatures

OnnxCatBoostClassifier

OnnxSklearnKNeighborsRegressor

OnnxSklearnPowerTransformer

OnnxCustomScorerTransform

OnnxSklearnKNeighborsTransformer

OnnxSklearnQuadraticDiscriminantAnalysis

OnnxDecorrelateTransformer

OnnxSklearnKernelCenterer

OnnxSklearnQuantileRegressor

OnnxIForest

OnnxSklearnKernelPCA

OnnxSklearnRANSACRegressor

OnnxLiveDecorrelateTransformer

OnnxSklearnLGBMClassifier

OnnxSklearnRFE

OnnxMockWrappedLightGbmBoosterClassifier

OnnxSklearnLGBMRegressor

OnnxSklearnRFECV

OnnxOrdinalEncoder

OnnxSklearnLabelBinarizer

OnnxSklearnRadiusNeighborsClassifier

OnnxPredictableTSNE

OnnxSklearnLabelEncoder

OnnxSklearnRadiusNeighborsRegressor

OnnxReplaceTransformer

OnnxSklearnLars

OnnxSklearnRandomForestClassifier

OnnxSklearnARDRegression

OnnxSklearnLarsCV

OnnxSklearnRandomForestRegressor

OnnxSklearnAdaBoostClassifier

OnnxSklearnLasso

OnnxSklearnRandomTreesEmbedding

OnnxSklearnAdaBoostRegressor

OnnxSklearnLassoCV

OnnxSklearnRidge

OnnxSklearnBaggingClassifier

OnnxSklearnLassoLars

OnnxSklearnRidgeCV

OnnxSklearnBaggingRegressor

OnnxSklearnLassoLarsCV

OnnxSklearnRidgeClassifier

OnnxSklearnBayesianGaussianMixture

OnnxSklearnLassoLarsIC

OnnxSklearnRidgeClassifierCV

OnnxSklearnBayesianRidge

OnnxSklearnLinearDiscriminantAnalysis

OnnxSklearnRobustScaler

OnnxSklearnBernoulliNB

OnnxSklearnLinearRegression

OnnxSklearnSGDClassifier

OnnxSklearnBinarizer

OnnxSklearnLinearSVC

OnnxSklearnSGDOneClassSVM

OnnxSklearnCalibratedClassifierCV

OnnxSklearnLinearSVR

OnnxSklearnSGDRegressor

OnnxSklearnCategoricalNB

OnnxSklearnLocalOutlierFactor

OnnxSklearnSVC

OnnxSklearnColumnTransformer

OnnxSklearnLogisticRegression

OnnxSklearnSVR

OnnxSklearnComplementNB

OnnxSklearnLogisticRegressionCV

OnnxSklearnSelectFdr

OnnxSklearnCountVectorizer

OnnxSklearnMLPClassifier

OnnxSklearnSelectFpr

OnnxSklearnDecisionTreeClassifier

OnnxSklearnMLPRegressor

OnnxSklearnSelectFromModel

OnnxSklearnDecisionTreeRegressor

OnnxSklearnMaxAbsScaler

OnnxSklearnSelectFwe

OnnxSklearnDictVectorizer

OnnxSklearnMinMaxScaler

OnnxSklearnSelectKBest

OnnxSklearnElasticNet

OnnxSklearnMiniBatchKMeans

OnnxSklearnSelectPercentile

OnnxSklearnElasticNetCV

OnnxSklearnMultiOutputClassifier

OnnxSklearnSimpleImputer

OnnxSklearnExtraTreeClassifier

OnnxSklearnMultiOutputRegressor

OnnxSklearnStackingClassifier

OnnxSklearnExtraTreeRegressor

OnnxSklearnMultiTaskElasticNet

OnnxSklearnStackingRegressor

OnnxSklearnExtraTreesClassifier

OnnxSklearnMultiTaskElasticNetCV

OnnxSklearnStandardScaler

OnnxSklearnExtraTreesRegressor

OnnxSklearnMultiTaskLasso

OnnxSklearnTfidfTransformer

OnnxSklearnFeatureHasher

OnnxSklearnMultiTaskLassoCV

OnnxSklearnTfidfVectorizer

OnnxSklearnFeatureUnion

OnnxSklearnMultinomialNB

OnnxSklearnTheilSenRegressor

OnnxSklearnFunctionTransformer

OnnxSklearnNearestNeighbors

OnnxSklearnTraceableCountVectorizer

OnnxSklearnGammaRegressor

OnnxSklearnNeighborhoodComponentsAnalysis

OnnxSklearnTraceableTfidfVectorizer

OnnxSklearnGaussianMixture

OnnxSklearnNormalizer

OnnxSklearnTransformedTargetRegressor

OnnxSklearnGaussianNB

OnnxSklearnNuSVC

OnnxSklearnTruncatedSVD

OnnxSklearnGaussianProcessClassifier

OnnxSklearnNuSVR

OnnxSklearnTweedieRegressor

OnnxSklearnGaussianProcessRegressor

OnnxSklearnOneClassSVM

OnnxSklearnVarianceThreshold

OnnxSklearnGaussianRandomProjection

OnnxSklearnOneHotEncoder

OnnxSklearnVotingClassifier

OnnxSklearnGenericUnivariateSelect

OnnxSklearnOneVsOneClassifier

OnnxSklearnVotingRegressor

OnnxSklearnGradientBoostingClassifier

OnnxSklearnOneVsRestClassifier

OnnxSklearnXGBClassifier

OnnxSklearnGradientBoostingRegressor

OnnxSklearnOrdinalEncoder

OnnxSklearnXGBRegressor

OnnxSklearnGridSearchCV

OnnxSklearnOrthogonalMatchingPursuit

OnnxTransferTransformer

OnnxSklearnHistGradientBoostingClassifier

OnnxSklearnOrthogonalMatchingPursuitCV

OnnxValidatorClassifier

OnnxSklearnHistGradientBoostingRegressor

OnnxSklearnPCA

OnnxWOEEncoder

OnnxSklearnHuberRegressor

OnnxSklearnPLSRegression

OnnxWOETransformer

OnnxSklearnIncrementalPCA

OnnxSklearnPassiveAggressiveClassifier

OnnxWrappedLightGbmBooster

OnnxSklearnIsolationForest

OnnxSklearnPassiveAggressiveRegressor

OnnxWrappedLightGbmBoosterClassifier

OnnxSklearnKBinsDiscretizer

OnnxSklearnPerceptron

+
+
+
+
+

OnnxBooster#

+
+
+
+
+

OnnxCastRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxCastRegressor(estimator, *, dtype=<class 'numpy.float32'>)#
+

OnnxOperatorMixin for CastRegressor

+
+ +
+
+
+
+

OnnxCastTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxCastTransformer(*, dtype=<class 'numpy.float32'>)#
+

OnnxOperatorMixin for CastTransformer

+
+ +
+
+
+
+

OnnxCatBoostClassifier#

+
+
+
+
+

OnnxCustomScorerTransform#

+
+
+
+
+

OnnxDecorrelateTransformer#

+
+
+
+
+

OnnxIForest#

+
+
+
+
+

OnnxLiveDecorrelateTransformer#

+
+
+
+
+

OnnxMockWrappedLightGbmBoosterClassifier#

+
+
+
+
+

OnnxOrdinalEncoder#

+
+
+
+
+

OnnxPredictableTSNE#

+
+
+
+
+

OnnxReplaceTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxReplaceTransformer(*, from_value=0, to_value=nan, dtype=<class 'numpy.float32'>)#
+

OnnxOperatorMixin for ReplaceTransformer

+
+ +
+
+
+
+

OnnxSklearnARDRegression#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnARDRegression(*, n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06, lambda_2=1e-06, compute_score=False, threshold_lambda=10000.0, fit_intercept=True, copy_X=True, verbose=False)#
+

OnnxOperatorMixin for ARDRegression

+
+ +
+
+
+
+

OnnxSklearnAdaBoostClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostClassifier(estimator=None, *, n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None, base_estimator='deprecated')#
+

OnnxOperatorMixin for AdaBoostClassifier

+
+ +
+
+
+
+

OnnxSklearnAdaBoostRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnAdaBoostRegressor(estimator=None, *, n_estimators=50, learning_rate=1.0, loss='linear', random_state=None, base_estimator='deprecated')#
+

OnnxOperatorMixin for AdaBoostRegressor

+
+ +
+
+
+
+

OnnxSklearnBaggingClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingClassifier(estimator=None, n_estimators=10, *, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0, base_estimator='deprecated')#
+

OnnxOperatorMixin for BaggingClassifier

+
+ +
+
+
+
+

OnnxSklearnBaggingRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBaggingRegressor(estimator=None, n_estimators=10, *, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=None, random_state=None, verbose=0, base_estimator='deprecated')#
+

OnnxOperatorMixin for BaggingRegressor

+
+ +
+
+
+
+

OnnxSklearnBayesianGaussianMixture#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianGaussianMixture(*, n_components=1, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weight_concentration_prior_type='dirichlet_process', weight_concentration_prior=None, mean_precision_prior=None, mean_prior=None, degrees_of_freedom_prior=None, covariance_prior=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10)#
+

OnnxOperatorMixin for BayesianGaussianMixture

+
+ +
+
+
+
+

OnnxSklearnBayesianRidge#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBayesianRidge(*, n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06, lambda_2=1e-06, alpha_init=None, lambda_init=None, compute_score=False, fit_intercept=True, copy_X=True, verbose=False)#
+

OnnxOperatorMixin for BayesianRidge

+
+ +
+
+
+
+

OnnxSklearnBernoulliNB#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBernoulliNB(*, alpha=1.0, force_alpha='warn', binarize=0.0, fit_prior=True, class_prior=None)#
+

OnnxOperatorMixin for BernoulliNB

+
+ +
+
+
+
+

OnnxSklearnBinarizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnBinarizer(*, threshold=0.0, copy=True)#
+

OnnxOperatorMixin for Binarizer

+
+ +
+
+
+
+

OnnxSklearnCalibratedClassifierCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnCalibratedClassifierCV(estimator=None, *, method='sigmoid', cv=None, n_jobs=None, ensemble=True, base_estimator='deprecated')#
+

OnnxOperatorMixin for CalibratedClassifierCV

+
+ +
+
+
+
+

OnnxSklearnCategoricalNB#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnCategoricalNB(*, alpha=1.0, force_alpha='warn', fit_prior=True, class_prior=None, min_categories=None)#
+

OnnxOperatorMixin for CategoricalNB

+
+ +
+
+
+
+

OnnxSklearnColumnTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer(transformers, *, remainder='drop', sparse_threshold=0.3, n_jobs=None, transformer_weights=None, verbose=False, verbose_feature_names_out=True)[source]#
+

OnnxOperatorMixin for ColumnTransformer

+
+ +
+
+
+
+

OnnxSklearnComplementNB#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnComplementNB(*, alpha=1.0, force_alpha='warn', fit_prior=True, class_prior=None, norm=False)#
+

OnnxOperatorMixin for ComplementNB

+
+ +
+
+
+
+

OnnxSklearnCountVectorizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnCountVectorizer(*, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern='(?u)\\b\\w\\w+\\b', ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=<class 'numpy.int64'>)#
+

OnnxOperatorMixin for CountVectorizer

+
+ +
+
+
+
+

OnnxSklearnDecisionTreeClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeClassifier(*, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0)#
+

OnnxOperatorMixin for DecisionTreeClassifier

+
+ +
+
+
+
+

OnnxSklearnDecisionTreeRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnDecisionTreeRegressor(*, criterion='squared_error', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, ccp_alpha=0.0)#
+

OnnxOperatorMixin for DecisionTreeRegressor

+
+ +
+
+
+
+

OnnxSklearnDictVectorizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnDictVectorizer(*, dtype=<class 'numpy.float64'>, separator='=', sparse=True, sort=True)#
+

OnnxOperatorMixin for DictVectorizer

+
+ +
+
+
+
+

OnnxSklearnElasticNet#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNet(alpha=1.0, *, l1_ratio=0.5, fit_intercept=True, precompute=False, max_iter=1000, copy_X=True, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for ElasticNet

+
+ +
+
+
+
+

OnnxSklearnElasticNetCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnElasticNetCV(*, l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, precompute='auto', max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, positive=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for ElasticNetCV

+
+ +
+
+
+
+

OnnxSklearnExtraTreeClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeClassifier(*, criterion='gini', splitter='random', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='sqrt', random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0)#
+

OnnxOperatorMixin for ExtraTreeClassifier

+
+ +
+
+
+
+

OnnxSklearnExtraTreeRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreeRegressor(*, criterion='squared_error', splitter='random', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=1.0, random_state=None, min_impurity_decrease=0.0, max_leaf_nodes=None, ccp_alpha=0.0)#
+

OnnxOperatorMixin for ExtraTreeRegressor

+
+ +
+
+
+
+

OnnxSklearnExtraTreesClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesClassifier(n_estimators=100, *, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=False, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)#
+

OnnxOperatorMixin for ExtraTreesClassifier

+
+ +
+
+
+
+

OnnxSklearnExtraTreesRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnExtraTreesRegressor(n_estimators=100, *, criterion='squared_error', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=1.0, max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=False, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None)#
+

OnnxOperatorMixin for ExtraTreesRegressor

+
+ +
+
+
+
+

OnnxSklearnFeatureHasher#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureHasher(n_features=1048576, *, input_type='dict', dtype=<class 'numpy.float64'>, alternate_sign=True)#
+

OnnxOperatorMixin for FeatureHasher

+
+ +
+
+
+
+

OnnxSklearnFeatureUnion#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion(transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False)[source]#
+

OnnxOperatorMixin for FeatureUnion

+
+ +
+
+
+
+

OnnxSklearnFunctionTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnFunctionTransformer(func=None, inverse_func=None, *, validate=False, accept_sparse=False, check_inverse=True, feature_names_out=None, kw_args=None, inv_kw_args=None)#
+

OnnxOperatorMixin for FunctionTransformer

+
+ +
+
+
+
+

OnnxSklearnGammaRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGammaRegressor(*, alpha=1.0, fit_intercept=True, solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0)#
+

OnnxOperatorMixin for GammaRegressor

+
+ +
+
+
+
+

OnnxSklearnGaussianMixture#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianMixture(n_components=1, *, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10)#
+

OnnxOperatorMixin for GaussianMixture

+
+ +
+
+
+
+

OnnxSklearnGaussianNB#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianNB(*, priors=None, var_smoothing=1e-09)#
+

OnnxOperatorMixin for GaussianNB

+
+ +
+
+
+
+

OnnxSklearnGaussianProcessClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessClassifier(kernel=None, *, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class='one_vs_rest', n_jobs=None)#
+

OnnxOperatorMixin for GaussianProcessClassifier

+
+ +
+
+
+
+

OnnxSklearnGaussianProcessRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianProcessRegressor(kernel=None, *, alpha=1e-10, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None)#
+

OnnxOperatorMixin for GaussianProcessRegressor

+
+ +
+
+
+
+

OnnxSklearnGaussianRandomProjection#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGaussianRandomProjection(n_components='auto', *, eps=0.1, compute_inverse_components=False, random_state=None)#
+

OnnxOperatorMixin for GaussianRandomProjection

+
+ +
+
+
+
+

OnnxSklearnGenericUnivariateSelect#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGenericUnivariateSelect(score_func=<function f_classif>, *, mode='percentile', param=1e-05)#
+

OnnxOperatorMixin for GenericUnivariateSelect

+
+ +
+
+
+
+

OnnxSklearnGradientBoostingClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingClassifier(*, loss='log_loss', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)#
+

OnnxOperatorMixin for GradientBoostingClassifier

+
+ +
+
+
+
+

OnnxSklearnGradientBoostingRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGradientBoostingRegressor(*, loss='squared_error', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)#
+

OnnxOperatorMixin for GradientBoostingRegressor

+
+ +
+
+
+
+

OnnxSklearnGridSearchCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnGridSearchCV(estimator, param_grid, *, scoring=None, n_jobs=None, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score=nan, return_train_score=False)#
+

OnnxOperatorMixin for GridSearchCV

+
+ +
+
+
+
+

OnnxSklearnHistGradientBoostingClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingClassifier(loss='log_loss', *, learning_rate=0.1, max_iter=100, max_leaf_nodes=31, max_depth=None, min_samples_leaf=20, l2_regularization=0.0, max_bins=255, categorical_features=None, monotonic_cst=None, interaction_cst=None, warm_start=False, early_stopping='auto', scoring='loss', validation_fraction=0.1, n_iter_no_change=10, tol=1e-07, verbose=0, random_state=None, class_weight=None)#
+

OnnxOperatorMixin for HistGradientBoostingClassifier

+
+ +
+
+
+
+

OnnxSklearnHistGradientBoostingRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnHistGradientBoostingRegressor(loss='squared_error', *, quantile=None, learning_rate=0.1, max_iter=100, max_leaf_nodes=31, max_depth=None, min_samples_leaf=20, l2_regularization=0.0, max_bins=255, categorical_features=None, monotonic_cst=None, interaction_cst=None, warm_start=False, early_stopping='auto', scoring='loss', validation_fraction=0.1, n_iter_no_change=10, tol=1e-07, verbose=0, random_state=None)#
+

OnnxOperatorMixin for HistGradientBoostingRegressor

+
+ +
+
+
+
+

OnnxSklearnHuberRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnHuberRegressor(*, epsilon=1.35, max_iter=100, alpha=0.0001, warm_start=False, fit_intercept=True, tol=1e-05)#
+

OnnxOperatorMixin for HuberRegressor

+
+ +
+
+
+
+

OnnxSklearnIncrementalPCA#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnIncrementalPCA(n_components=None, *, whiten=False, copy=True, batch_size=None)#
+

OnnxOperatorMixin for IncrementalPCA

+
+ +
+
+
+
+

OnnxSklearnIsolationForest#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnIsolationForest(*, n_estimators=100, max_samples='auto', contamination='auto', max_features=1.0, bootstrap=False, n_jobs=None, random_state=None, verbose=0, warm_start=False)#
+

OnnxOperatorMixin for IsolationForest

+
+ +
+
+
+
+

OnnxSklearnKBinsDiscretizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKBinsDiscretizer(n_bins=5, *, encode='onehot', strategy='quantile', dtype=None, subsample='warn', random_state=None)#
+

OnnxOperatorMixin for KBinsDiscretizer

+
+ +
+
+
+
+

OnnxSklearnKMeans#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKMeans(n_clusters=8, *, init='k-means++', n_init='warn', max_iter=300, tol=0.0001, verbose=0, random_state=None, copy_x=True, algorithm='lloyd')#
+

OnnxOperatorMixin for KMeans

+
+ +
+
+
+
+

OnnxSklearnKNNImputer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNNImputer(*, missing_values=nan, n_neighbors=5, weights='uniform', metric='nan_euclidean', copy=True, add_indicator=False, keep_empty_features=False)#
+

OnnxOperatorMixin for KNNImputer

+
+ +
+
+
+
+

OnnxSklearnKNeighborsClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsClassifier(n_neighbors=5, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for KNeighborsClassifier

+
+ +
+
+
+
+

OnnxSklearnKNeighborsRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsRegressor(n_neighbors=5, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for KNeighborsRegressor

+
+ +
+
+
+
+

OnnxSklearnKNeighborsTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKNeighborsTransformer(*, mode='distance', n_neighbors=5, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for KNeighborsTransformer

+
+ +
+
+
+
+

OnnxSklearnKernelCenterer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelCenterer#
+

OnnxOperatorMixin for KernelCenterer

+
+ +
+
+
+
+

OnnxSklearnKernelPCA#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnKernelPCA(n_components=None, *, kernel='linear', gamma=None, degree=3, coef0=1, kernel_params=None, alpha=1.0, fit_inverse_transform=False, eigen_solver='auto', tol=0, max_iter=None, iterated_power='auto', remove_zero_eig=False, random_state=None, copy_X=True, n_jobs=None)#
+

OnnxOperatorMixin for KernelPCA

+
+ +
+
+
+
+

OnnxSklearnLGBMClassifier#

+
+
+
+
+

OnnxSklearnLGBMRegressor#

+
+
+
+
+

OnnxSklearnLabelBinarizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelBinarizer(*, neg_label=0, pos_label=1, sparse_output=False)#
+

OnnxOperatorMixin for LabelBinarizer

+
+ +
+
+
+
+

OnnxSklearnLabelEncoder#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLabelEncoder#
+

OnnxOperatorMixin for LabelEncoder

+
+ +
+
+
+
+

OnnxSklearnLars#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLars(*, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', n_nonzero_coefs=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, jitter=None, random_state=None)#
+

OnnxOperatorMixin for Lars

+
+ +
+
+
+
+

OnnxSklearnLarsCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLarsCV(*, fit_intercept=True, verbose=False, max_iter=500, normalize='deprecated', precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True)#
+

OnnxOperatorMixin for LarsCV

+
+ +
+
+
+
+

OnnxSklearnLasso#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLasso(alpha=1.0, *, fit_intercept=True, precompute=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for Lasso

+
+ +
+
+
+
+

OnnxSklearnLassoCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoCV(*, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for LassoCV

+
+ +
+
+
+
+

OnnxSklearnLassoLars#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLars(alpha=1.0, *, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False, jitter=None, random_state=None)#
+

OnnxOperatorMixin for LassoLars

+
+ +
+
+
+
+

OnnxSklearnLassoLarsCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsCV(*, fit_intercept=True, verbose=False, max_iter=500, normalize='deprecated', precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True, positive=False)#
+

OnnxOperatorMixin for LassoLarsCV

+
+ +
+
+
+
+

OnnxSklearnLassoLarsIC#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLassoLarsIC(criterion='aic', *, fit_intercept=True, verbose=False, normalize='deprecated', precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, positive=False, noise_variance=None)#
+

OnnxOperatorMixin for LassoLarsIC

+
+ +
+
+
+
+

OnnxSklearnLinearDiscriminantAnalysis#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearDiscriminantAnalysis(solver='svd', shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=0.0001, covariance_estimator=None)#
+

OnnxOperatorMixin for LinearDiscriminantAnalysis

+
+ +
+
+
+
+

OnnxSklearnLinearRegression#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearRegression(*, fit_intercept=True, copy_X=True, n_jobs=None, positive=False)#
+

OnnxOperatorMixin for LinearRegression

+
+ +
+
+
+
+

OnnxSklearnLinearSVC#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVC(penalty='l2', loss='squared_hinge', *, dual=True, tol=0.0001, C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000)#
+

OnnxOperatorMixin for LinearSVC

+
+ +
+
+
+
+

OnnxSklearnLinearSVR#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLinearSVR(*, epsilon=0.0, tol=0.0001, C=1.0, loss='epsilon_insensitive', fit_intercept=True, intercept_scaling=1.0, dual=True, verbose=0, random_state=None, max_iter=1000)#
+

OnnxOperatorMixin for LinearSVR

+
+ +
+
+
+
+

OnnxSklearnLocalOutlierFactor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLocalOutlierFactor(n_neighbors=20, *, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, contamination='auto', novelty=False, n_jobs=None)#
+

OnnxOperatorMixin for LocalOutlierFactor

+
+ +
+
+
+
+

OnnxSklearnLogisticRegression#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegression(penalty='l2', *, dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)#
+

OnnxOperatorMixin for LogisticRegression

+
+ +
+
+
+
+

OnnxSklearnLogisticRegressionCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnLogisticRegressionCV(*, Cs=10, fit_intercept=True, cv=None, dual=False, penalty='l2', scoring=None, solver='lbfgs', tol=0.0001, max_iter=100, class_weight=None, n_jobs=None, verbose=0, refit=True, intercept_scaling=1.0, multi_class='auto', random_state=None, l1_ratios=None)#
+

OnnxOperatorMixin for LogisticRegressionCV

+
+ +
+
+
+
+

OnnxSklearnMLPClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPClassifier(hidden_layer_sizes=(100,), activation='relu', *, solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10, max_fun=15000)#
+

OnnxOperatorMixin for MLPClassifier

+
+ +
+
+
+
+

OnnxSklearnMLPRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMLPRegressor(hidden_layer_sizes=(100,), activation='relu', *, solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10, max_fun=15000)#
+

OnnxOperatorMixin for MLPRegressor

+
+ +
+
+
+
+

OnnxSklearnMaxAbsScaler#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMaxAbsScaler(*, copy=True)#
+

OnnxOperatorMixin for MaxAbsScaler

+
+ +
+
+
+
+

OnnxSklearnMinMaxScaler#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMinMaxScaler(feature_range=(0, 1), *, copy=True, clip=False)#
+

OnnxOperatorMixin for MinMaxScaler

+
+ +
+
+
+
+

OnnxSklearnMiniBatchKMeans#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMiniBatchKMeans(n_clusters=8, *, init='k-means++', max_iter=100, batch_size=1024, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init='warn', reassignment_ratio=0.01)#
+

OnnxOperatorMixin for MiniBatchKMeans

+
+ +
+
+
+
+

OnnxSklearnMultiOutputClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputClassifier(estimator, *, n_jobs=None)#
+

OnnxOperatorMixin for MultiOutputClassifier

+
+ +
+
+
+
+

OnnxSklearnMultiOutputRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiOutputRegressor(estimator, *, n_jobs=None)#
+

OnnxOperatorMixin for MultiOutputRegressor

+
+ +
+
+
+
+

OnnxSklearnMultiTaskElasticNet#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNet(alpha=1.0, *, l1_ratio=0.5, fit_intercept=True, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for MultiTaskElasticNet

+
+ +
+
+
+
+

OnnxSklearnMultiTaskElasticNetCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskElasticNetCV(*, l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for MultiTaskElasticNetCV

+
+ +
+
+
+
+

OnnxSklearnMultiTaskLasso#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLasso(alpha=1.0, *, fit_intercept=True, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for MultiTaskLasso

+
+ +
+
+
+
+

OnnxSklearnMultiTaskLassoCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultiTaskLassoCV(*, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, random_state=None, selection='cyclic')#
+

OnnxOperatorMixin for MultiTaskLassoCV

+
+ +
+
+
+
+

OnnxSklearnMultinomialNB#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnMultinomialNB(*, alpha=1.0, force_alpha='warn', fit_prior=True, class_prior=None)#
+

OnnxOperatorMixin for MultinomialNB

+
+ +
+
+
+
+

OnnxSklearnNearestNeighbors#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnNearestNeighbors(*, n_neighbors=5, radius=1.0, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for NearestNeighbors

+
+ +
+
+
+
+

OnnxSklearnNeighborhoodComponentsAnalysis#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnNeighborhoodComponentsAnalysis(n_components=None, *, init='auto', warm_start=False, max_iter=50, tol=1e-05, callback=None, verbose=0, random_state=None)#
+

OnnxOperatorMixin for NeighborhoodComponentsAnalysis

+
+ +
+
+
+
+

OnnxSklearnNormalizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnNormalizer(norm='l2', *, copy=True)#
+

OnnxOperatorMixin for Normalizer

+
+ +
+
+
+
+

OnnxSklearnNuSVC#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVC(*, nu=0.5, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', break_ties=False, random_state=None)#
+

OnnxOperatorMixin for NuSVC

+
+ +
+
+
+
+

OnnxSklearnNuSVR#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnNuSVR(*, nu=0.5, C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, tol=0.001, cache_size=200, verbose=False, max_iter=-1)#
+

OnnxOperatorMixin for NuSVR

+
+ +
+
+
+
+

OnnxSklearnOneClassSVM#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneClassSVM(*, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=0.001, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=-1)#
+

OnnxOperatorMixin for OneClassSVM

+
+ +
+
+
+
+

OnnxSklearnOneHotEncoder#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneHotEncoder(*, categories='auto', drop=None, sparse='deprecated', sparse_output=True, dtype=<class 'numpy.float64'>, handle_unknown='error', min_frequency=None, max_categories=None)#
+

OnnxOperatorMixin for OneHotEncoder

+
+ +
+
+
+
+

OnnxSklearnOneVsOneClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneVsOneClassifier(estimator, *, n_jobs=None)#
+

OnnxOperatorMixin for OneVsOneClassifier

+
+ +
+
+
+
+

OnnxSklearnOneVsRestClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOneVsRestClassifier(estimator, *, n_jobs=None, verbose=0)#
+

OnnxOperatorMixin for OneVsRestClassifier

+
+ +
+
+
+
+

OnnxSklearnOrdinalEncoder#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrdinalEncoder(*, categories='auto', dtype=<class 'numpy.float64'>, handle_unknown='error', unknown_value=None, encoded_missing_value=nan)#
+

OnnxOperatorMixin for OrdinalEncoder

+
+ +
+
+
+
+

OnnxSklearnOrthogonalMatchingPursuit#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuit(*, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize='deprecated', precompute='auto')#
+

OnnxOperatorMixin for OrthogonalMatchingPursuit

+
+ +
+
+
+
+

OnnxSklearnOrthogonalMatchingPursuitCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnOrthogonalMatchingPursuitCV(*, copy=True, fit_intercept=True, normalize='deprecated', max_iter=None, cv=None, n_jobs=None, verbose=False)#
+

OnnxOperatorMixin for OrthogonalMatchingPursuitCV

+
+ +
+
+
+
+

OnnxSklearnPCA#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPCA(n_components=None, *, copy=True, whiten=False, svd_solver='auto', tol=0.0, iterated_power='auto', n_oversamples=10, power_iteration_normalizer='auto', random_state=None)#
+

OnnxOperatorMixin for PCA

+
+ +
+
+
+
+

OnnxSklearnPLSRegression#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPLSRegression(n_components=2, *, scale=True, max_iter=500, tol=1e-06, copy=True)#
+

OnnxOperatorMixin for PLSRegression

+
+ +
+
+
+
+

OnnxSklearnPassiveAggressiveClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveClassifier(*, C=1.0, fit_intercept=True, max_iter=1000, tol=0.001, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, shuffle=True, verbose=0, loss='hinge', n_jobs=None, random_state=None, warm_start=False, class_weight=None, average=False)#
+

OnnxOperatorMixin for PassiveAggressiveClassifier

+
+ +
+
+
+
+

OnnxSklearnPassiveAggressiveRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPassiveAggressiveRegressor(*, C=1.0, fit_intercept=True, max_iter=1000, tol=0.001, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, shuffle=True, verbose=0, loss='epsilon_insensitive', epsilon=0.1, random_state=None, warm_start=False, average=False)#
+

OnnxOperatorMixin for PassiveAggressiveRegressor

+
+ +
+
+
+
+

OnnxSklearnPerceptron#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPerceptron(*, penalty=None, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, eta0=1.0, n_jobs=None, random_state=0, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False)#
+

OnnxOperatorMixin for Perceptron

+
+ +
+
+
+
+

OnnxSklearnPipeline#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline(steps, *, memory=None, verbose=False)[source]#
+

OnnxOperatorMixin for Pipeline

+
+ +
+
+
+
+

OnnxSklearnPoissonRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPoissonRegressor(*, alpha=1.0, fit_intercept=True, solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0)#
+

OnnxOperatorMixin for PoissonRegressor

+
+ +
+
+
+
+

OnnxSklearnPolynomialFeatures#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPolynomialFeatures(degree=2, *, interaction_only=False, include_bias=True, order='C')#
+

OnnxOperatorMixin for PolynomialFeatures

+
+ +
+
+
+
+

OnnxSklearnPowerTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPowerTransformer(method='yeo-johnson', *, standardize=True, copy=True)#
+

OnnxOperatorMixin for PowerTransformer

+
+ +
+
+
+
+

OnnxSklearnQuadraticDiscriminantAnalysis#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnQuadraticDiscriminantAnalysis(*, priors=None, reg_param=0.0, store_covariance=False, tol=0.0001)#
+

OnnxOperatorMixin for QuadraticDiscriminantAnalysis

+
+ +
+
+
+
+

OnnxSklearnQuantileRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnQuantileRegressor(*, quantile=0.5, alpha=1.0, fit_intercept=True, solver='warn', solver_options=None)#
+

OnnxOperatorMixin for QuantileRegressor

+
+ +
+
+
+
+

OnnxSklearnRANSACRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRANSACRegressor(estimator=None, *, min_samples=None, residual_threshold=None, is_data_valid=None, is_model_valid=None, max_trials=100, max_skips=inf, stop_n_inliers=inf, stop_score=inf, stop_probability=0.99, loss='absolute_error', random_state=None, base_estimator='deprecated')#
+

OnnxOperatorMixin for RANSACRegressor

+
+ +
+
+
+
+

OnnxSklearnRFE#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRFE(estimator, *, n_features_to_select=None, step=1, verbose=0, importance_getter='auto')#
+

OnnxOperatorMixin for RFE

+
+ +
+
+
+
+

OnnxSklearnRFECV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRFECV(estimator, *, step=1, min_features_to_select=1, cv=None, scoring=None, verbose=0, n_jobs=None, importance_getter='auto')#
+

OnnxOperatorMixin for RFECV

+
+ +
+
+
+
+

OnnxSklearnRadiusNeighborsClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsClassifier(radius=1.0, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', outlier_label=None, metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for RadiusNeighborsClassifier

+
+ +
+
+
+
+

OnnxSklearnRadiusNeighborsRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRadiusNeighborsRegressor(radius=1.0, *, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None)#
+

OnnxOperatorMixin for RadiusNeighborsRegressor

+
+ +
+
+
+
+

OnnxSklearnRandomForestClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestClassifier(n_estimators=100, *, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)#
+

OnnxOperatorMixin for RandomForestClassifier

+
+ +
+
+
+
+

OnnxSklearnRandomForestRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomForestRegressor(n_estimators=100, *, criterion='squared_error', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=1.0, max_leaf_nodes=None, min_impurity_decrease=0.0, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None)#
+

OnnxOperatorMixin for RandomForestRegressor

+
+ +
+
+
+
+

OnnxSklearnRandomTreesEmbedding#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRandomTreesEmbedding(n_estimators=100, *, max_depth=5, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_leaf_nodes=None, min_impurity_decrease=0.0, sparse_output=True, n_jobs=None, random_state=None, verbose=0, warm_start=False)#
+

OnnxOperatorMixin for RandomTreesEmbedding

+
+ +
+
+
+
+

OnnxSklearnRidge#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidge(alpha=1.0, *, fit_intercept=True, copy_X=True, max_iter=None, tol=0.0001, solver='auto', positive=False, random_state=None)#
+

OnnxOperatorMixin for Ridge

+
+ +
+
+
+
+

OnnxSklearnRidgeCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeCV(alphas=(0.1, 1.0, 10.0), *, fit_intercept=True, scoring=None, cv=None, gcv_mode=None, store_cv_values=False, alpha_per_target=False)#
+

OnnxOperatorMixin for RidgeCV

+
+ +
+
+
+
+

OnnxSklearnRidgeClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifier(alpha=1.0, *, fit_intercept=True, copy_X=True, max_iter=None, tol=0.0001, class_weight=None, solver='auto', positive=False, random_state=None)#
+

OnnxOperatorMixin for RidgeClassifier

+
+ +
+
+
+
+

OnnxSklearnRidgeClassifierCV#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRidgeClassifierCV(alphas=(0.1, 1.0, 10.0), *, fit_intercept=True, scoring=None, cv=None, class_weight=None, store_cv_values=False)#
+

OnnxOperatorMixin for RidgeClassifierCV

+
+ +
+
+
+
+

OnnxSklearnRobustScaler#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnRobustScaler(*, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True, unit_variance=False)#
+

OnnxOperatorMixin for RobustScaler

+
+ +
+
+
+
+

OnnxSklearnSGDClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDClassifier(loss='hinge', *, penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, n_jobs=None, random_state=None, learning_rate='optimal', eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False)#
+

OnnxOperatorMixin for SGDClassifier

+
+ +
+
+
+
+

OnnxSklearnSGDOneClassSVM#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDOneClassSVM(nu=0.5, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, random_state=None, learning_rate='optimal', eta0=0.0, power_t=0.5, warm_start=False, average=False)#
+

OnnxOperatorMixin for SGDOneClassSVM

+
+ +
+
+
+
+

OnnxSklearnSGDRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSGDRegressor(loss='squared_error', *, penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False)#
+

OnnxOperatorMixin for SGDRegressor

+
+ +
+
+
+
+

OnnxSklearnSVC#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSVC(*, C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', break_ties=False, random_state=None)#
+

OnnxOperatorMixin for SVC

+
+ +
+
+
+
+

OnnxSklearnSVR#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSVR(*, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1)#
+

OnnxOperatorMixin for SVR

+
+ +
+
+
+
+

OnnxSklearnSelectFdr#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFdr(score_func=<function f_classif>, *, alpha=0.05)#
+

OnnxOperatorMixin for SelectFdr

+
+ +
+
+
+
+

OnnxSklearnSelectFpr#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFpr(score_func=<function f_classif>, *, alpha=0.05)#
+

OnnxOperatorMixin for SelectFpr

+
+ +
+
+
+
+

OnnxSklearnSelectFromModel#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFromModel(estimator, *, threshold=None, prefit=False, norm_order=1, max_features=None, importance_getter='auto')#
+

OnnxOperatorMixin for SelectFromModel

+
+ +
+
+
+
+

OnnxSklearnSelectFwe#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectFwe(score_func=<function f_classif>, *, alpha=0.05)#
+

OnnxOperatorMixin for SelectFwe

+
+ +
+
+
+
+

OnnxSklearnSelectKBest#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectKBest(score_func=<function f_classif>, *, k=10)#
+

OnnxOperatorMixin for SelectKBest

+
+ +
+
+
+
+

OnnxSklearnSelectPercentile#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSelectPercentile(score_func=<function f_classif>, *, percentile=10)#
+

OnnxOperatorMixin for SelectPercentile

+
+ +
+
+
+
+

OnnxSklearnSimpleImputer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnSimpleImputer(*, missing_values=nan, strategy='mean', fill_value=None, verbose='deprecated', copy=True, add_indicator=False, keep_empty_features=False)#
+

OnnxOperatorMixin for SimpleImputer

+
+ +
+
+
+
+

OnnxSklearnStackingClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingClassifier(estimators, final_estimator=None, *, cv=None, stack_method='auto', n_jobs=None, passthrough=False, verbose=0)#
+

OnnxOperatorMixin for StackingClassifier

+
+ +
+
+
+
+

OnnxSklearnStackingRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnStackingRegressor(estimators, final_estimator=None, *, cv=None, n_jobs=None, passthrough=False, verbose=0)#
+

OnnxOperatorMixin for StackingRegressor

+
+ +
+
+
+
+

OnnxSklearnStandardScaler#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnStandardScaler(*, copy=True, with_mean=True, with_std=True)#
+

OnnxOperatorMixin for StandardScaler

+
+ +
+
+
+
+

OnnxSklearnTfidfTransformer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfTransformer(*, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)#
+

OnnxOperatorMixin for TfidfTransformer

+
+ +
+
+
+
+

OnnxSklearnTfidfVectorizer#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnTfidfVectorizer(*, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern='(?u)\\b\\w\\w+\\b', ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=<class 'numpy.float64'>, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)#
+

OnnxOperatorMixin for TfidfVectorizer

+
+ +
+
+
+
+

OnnxSklearnTheilSenRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnTheilSenRegressor(*, fit_intercept=True, copy_X=True, max_subpopulation=10000.0, n_subsamples=None, max_iter=300, tol=0.001, random_state=None, n_jobs=None, verbose=False)#
+

OnnxOperatorMixin for TheilSenRegressor

+
+ +
+
+
+
+

OnnxSklearnTraceableCountVectorizer#

+
+
+
+
+

OnnxSklearnTraceableTfidfVectorizer#

+
+
+
+
+

OnnxSklearnTransformedTargetRegressor#

+
+
+
+
+

OnnxSklearnTruncatedSVD#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnTruncatedSVD(n_components=2, *, algorithm='randomized', n_iter=5, n_oversamples=10, power_iteration_normalizer='auto', random_state=None, tol=0.0)#
+

OnnxOperatorMixin for TruncatedSVD

+
+ +
+
+
+
+

OnnxSklearnTweedieRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnTweedieRegressor(*, power=0.0, alpha=1.0, fit_intercept=True, link='auto', solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0)#
+

OnnxOperatorMixin for TweedieRegressor

+
+ +
+
+
+
+

OnnxSklearnVarianceThreshold#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnVarianceThreshold(threshold=0.0)#
+

OnnxOperatorMixin for VarianceThreshold

+
+ +
+
+
+
+

OnnxSklearnVotingClassifier#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingClassifier(estimators, *, voting='hard', weights=None, n_jobs=None, flatten_transform=True, verbose=False)#
+

OnnxOperatorMixin for VotingClassifier

+
+ +
+
+
+
+

OnnxSklearnVotingRegressor#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnVotingRegressor(estimators, *, weights=None, n_jobs=None, verbose=False)#
+

OnnxOperatorMixin for VotingRegressor

+
+ +
+
+
+
+

OnnxSklearnXGBClassifier#

+
+
+
+
+

OnnxSklearnXGBRegressor#

+
+
+
+
+

OnnxTransferTransformer#

+
+
+
+
+

OnnxValidatorClassifier#

+
+
+
+
+

OnnxWOEEncoder#

+
+
+
+
+

OnnxWOETransformer#

+
+
+
+
+

OnnxWrappedLightGbmBooster#

+
+
+
+
+

OnnxWrappedLightGbmBoosterClassifier#

+
+
+
+
+
+

Pipeline#

+
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnPipeline(steps, *, memory=None, verbose=False)[source]#
+

OnnxOperatorMixin for Pipeline

+
+
+onnx_converter()#
+

Returns a converter for this model. +If not overloaded, it fetches the converter +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_parser()#
+

Returns a parser for this model. +If not overloaded, it calls the converter to guess the number +of outputs. If it still fails, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_shape_calculator()#
+

Returns a shape calculator for this model. +If not overloaded, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
+

Converts the model in ONNX format. +It calls method _to_onnx which must be +overloaded.

+
+
Parameters:
+
    +
  • X – training data, at least one sample, +it is used to guess the type of the input data.

  • +
  • name – name of the model, if None, +it is replaced by the the class name.

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes allowed +while converting a pipeline, if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • target_opset – to overwrite self.op_version

  • +
  • verbose – displays information while converting

  • +
+
+
+
+ +
+
+to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
+

This function must be overloaded.

+
+ +
+ +
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnColumnTransformer(transformers, *, remainder='drop', sparse_threshold=0.3, n_jobs=None, transformer_weights=None, verbose=False, verbose_feature_names_out=True)[source]#
+

OnnxOperatorMixin for ColumnTransformer

+
+
+onnx_converter()#
+

Returns a converter for this model. +If not overloaded, it fetches the converter +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_parser()#
+

Returns a parser for this model. +If not overloaded, it calls the converter to guess the number +of outputs. If it still fails, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_shape_calculator()#
+

Returns a shape calculator for this model. +If not overloaded, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
+

Converts the model in ONNX format. +It calls method _to_onnx which must be +overloaded.

+
+
Parameters:
+
    +
  • X – training data, at least one sample, +it is used to guess the type of the input data.

  • +
  • name – name of the model, if None, +it is replaced by the the class name.

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes allowed +while converting a pipeline, if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • target_opset – to overwrite self.op_version

  • +
  • verbose – displays information while converting

  • +
+
+
+
+ +
+
+to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
+

This function must be overloaded.

+
+ +
+ +
+
+class skl2onnx.algebra.sklearn_ops.OnnxSklearnFeatureUnion(transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False)[source]#
+

OnnxOperatorMixin for FeatureUnion

+
+
+onnx_converter()#
+

Returns a converter for this model. +If not overloaded, it fetches the converter +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_parser()#
+

Returns a parser for this model. +If not overloaded, it calls the converter to guess the number +of outputs. If it still fails, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+onnx_shape_calculator()#
+

Returns a shape calculator for this model. +If not overloaded, it fetches the parser +mapped to the first scikit-learn parent +it can find.

+
+ +
+
+to_onnx(X=None, name=None, options=None, white_op=None, black_op=None, final_types=None, target_opset=None, verbose=0)#
+

Converts the model in ONNX format. +It calls method _to_onnx which must be +overloaded.

+
+
Parameters:
+
    +
  • X – training data, at least one sample, +it is used to guess the type of the input data.

  • +
  • name – name of the model, if None, +it is replaced by the the class name.

  • +
  • options – specific options given to converters +(see Converters with options)

  • +
  • white_op – white list of ONNX nodes allowed +while converting a pipeline, if empty, all are allowed

  • +
  • black_op – black list of ONNX nodes allowed +while converting a pipeline, if empty, none are blacklisted

  • +
  • final_types – a python list. Works the same way as initial_types +but not mandatory, it is used to overwrites the type +(if type is not None) and the name of every output.

  • +
  • target_opset – to overwrite self.op_version

  • +
  • verbose – displays information while converting

  • +
+
+
+
+ +
+
+to_onnx_operator(inputs=None, outputs=None, target_opset=None, options=None)#
+

This function must be overloaded.

+
+ +
+ +
+
+

Available ONNX operators#

+

skl2onnx maps every ONNX operators into a class +easy to insert into a graph. These operators get +dynamically added and the list depends on the installed +ONNX package. The documentation for these operators +can be found on github: ONNX Operators.md +and ONNX-ML Operators. +Associated to onnxruntime, +the mapping makes it easier to easily check the output +of the ONNX operators on any data as shown +in example Play with ONNX operators.

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

OnnxAbs

OnnxGreaterOrEqual_16

OnnxReduceL2_1

OnnxAbs_1

OnnxGreater_1

OnnxReduceL2_11

OnnxAbs_13

OnnxGreater_13

OnnxReduceL2_13

OnnxAbs_6

OnnxGreater_7

OnnxReduceL2_18

OnnxAcos

OnnxGreater_9

OnnxReduceLogSum

OnnxAcos_7

OnnxGridSample

OnnxReduceLogSumExp

OnnxAcosh

OnnxGridSample_16

OnnxReduceLogSumExp_1

OnnxAcosh_9

OnnxGroupNormalization

OnnxReduceLogSumExp_11

OnnxAdagrad

OnnxGroupNormalization_18

OnnxReduceLogSumExp_13

OnnxAdagrad_1

OnnxHammingWindow

OnnxReduceLogSumExp_18

OnnxAdam

OnnxHammingWindow_17

OnnxReduceLogSum_1

OnnxAdam_1

OnnxHannWindow

OnnxReduceLogSum_11

OnnxAdd

OnnxHannWindow_17

OnnxReduceLogSum_13

OnnxAdd_1

OnnxHardSigmoid

OnnxReduceLogSum_18

OnnxAdd_13

OnnxHardSigmoid_1

OnnxReduceMax

OnnxAdd_14

OnnxHardSigmoid_6

OnnxReduceMax_1

OnnxAdd_6

OnnxHardSwish

OnnxReduceMax_11

OnnxAdd_7

OnnxHardSwish_14

OnnxReduceMax_12

OnnxAnd

OnnxHardmax

OnnxReduceMax_13

OnnxAnd_1

OnnxHardmax_1

OnnxReduceMax_18

OnnxAnd_7

OnnxHardmax_11

OnnxReduceMean

OnnxArgMax

OnnxHardmax_13

OnnxReduceMean_1

OnnxArgMax_1

OnnxIdentity

OnnxReduceMean_11

OnnxArgMax_11

OnnxIdentity_1

OnnxReduceMean_13

OnnxArgMax_12

OnnxIdentity_13

OnnxReduceMean_18

OnnxArgMax_13

OnnxIdentity_14

OnnxReduceMin

OnnxArgMin

OnnxIdentity_16

OnnxReduceMin_1

OnnxArgMin_1

OnnxIf

OnnxReduceMin_11

OnnxArgMin_11

OnnxIf_1

OnnxReduceMin_12

OnnxArgMin_12

OnnxIf_11

OnnxReduceMin_13

OnnxArgMin_13

OnnxIf_13

OnnxReduceMin_18

OnnxArrayFeatureExtractor

OnnxIf_16

OnnxReduceProd

OnnxArrayFeatureExtractor_1

OnnxImputer

OnnxReduceProd_1

OnnxAsin

OnnxImputer_1

OnnxReduceProd_11

OnnxAsin_7

OnnxInstanceNormalization

OnnxReduceProd_13

OnnxAsinh

OnnxInstanceNormalization_1

OnnxReduceProd_18

OnnxAsinh_9

OnnxInstanceNormalization_6

OnnxReduceSum

OnnxAtan

OnnxIsInf

OnnxReduceSumSquare

OnnxAtan_7

OnnxIsInf_10

OnnxReduceSumSquare_1

OnnxAtanh

OnnxIsNaN

OnnxReduceSumSquare_11

OnnxAtanh_9

OnnxIsNaN_13

OnnxReduceSumSquare_13

OnnxAveragePool

OnnxIsNaN_9

OnnxReduceSumSquare_18

OnnxAveragePool_1

OnnxLRN

OnnxReduceSum_1

OnnxAveragePool_10

OnnxLRN_1

OnnxReduceSum_11

OnnxAveragePool_11

OnnxLRN_13

OnnxReduceSum_13

OnnxAveragePool_19

OnnxLSTM

OnnxRelu

OnnxAveragePool_7

OnnxLSTM_1

OnnxRelu_1

OnnxBatchNormalization

OnnxLSTM_14

OnnxRelu_13

OnnxBatchNormalization_1

OnnxLSTM_7

OnnxRelu_14

OnnxBatchNormalization_14

OnnxLabelEncoder

OnnxRelu_6

OnnxBatchNormalization_15

OnnxLabelEncoder_1

OnnxReshape

OnnxBatchNormalization_6

OnnxLabelEncoder_2

OnnxReshape_1

OnnxBatchNormalization_7

OnnxLayerNormalization

OnnxReshape_13

OnnxBatchNormalization_9

OnnxLayerNormalization_17

OnnxReshape_14

OnnxBernoulli

OnnxLeakyRelu

OnnxReshape_5

OnnxBernoulli_15

OnnxLeakyRelu_1

OnnxResize

OnnxBinarizer

OnnxLeakyRelu_16

OnnxResize_10

OnnxBinarizer_1

OnnxLeakyRelu_6

OnnxResize_11

OnnxBitShift

OnnxLess

OnnxResize_13

OnnxBitShift_11

OnnxLessOrEqual

OnnxResize_18

OnnxBitwiseAnd

OnnxLessOrEqual_12

OnnxResize_19

OnnxBitwiseAnd_18

OnnxLessOrEqual_16

OnnxReverseSequence

OnnxBitwiseNot

OnnxLess_1

OnnxReverseSequence_10

OnnxBitwiseNot_18

OnnxLess_13

OnnxRoiAlign

OnnxBitwiseOr

OnnxLess_7

OnnxRoiAlign_10

OnnxBitwiseOr_18

OnnxLess_9

OnnxRoiAlign_16

OnnxBitwiseXor

OnnxLinearClassifier

OnnxRound

OnnxBitwiseXor_18

OnnxLinearClassifier_1

OnnxRound_11

OnnxBlackmanWindow

OnnxLinearRegressor

OnnxSTFT

OnnxBlackmanWindow_17

OnnxLinearRegressor_1

OnnxSTFT_17

OnnxCast

OnnxLog

OnnxSVMClassifier

OnnxCastLike

OnnxLogSoftmax

OnnxSVMClassifier_1

OnnxCastLike_15

OnnxLogSoftmax_1

OnnxSVMRegressor

OnnxCastMap

OnnxLogSoftmax_11

OnnxSVMRegressor_1

OnnxCastMap_1

OnnxLogSoftmax_13

OnnxScaler

OnnxCast_1

OnnxLog_1

OnnxScaler_1

OnnxCast_13

OnnxLog_13

OnnxScan

OnnxCast_6

OnnxLog_6

OnnxScan_11

OnnxCast_9

OnnxLoop

OnnxScan_16

OnnxCategoryMapper

OnnxLoop_1

OnnxScan_8

OnnxCategoryMapper_1

OnnxLoop_11

OnnxScan_9

OnnxCeil

OnnxLoop_13

OnnxScatter

OnnxCeil_1

OnnxLoop_16

OnnxScatterElements

OnnxCeil_13

OnnxLpNormalization

OnnxScatterElements_11

OnnxCeil_6

OnnxLpNormalization_1

OnnxScatterElements_13

OnnxCelu

OnnxLpPool

OnnxScatterElements_16

OnnxCelu_12

OnnxLpPool_1

OnnxScatterElements_18

OnnxCenterCropPad

OnnxLpPool_11

OnnxScatterND

OnnxCenterCropPad_18

OnnxLpPool_18

OnnxScatterND_11

OnnxClip

OnnxLpPool_2

OnnxScatterND_13

OnnxClip_1

OnnxMatMul

OnnxScatterND_16

OnnxClip_11

OnnxMatMulInteger

OnnxScatterND_18

OnnxClip_12

OnnxMatMulInteger_10

OnnxScatter_11

OnnxClip_13

OnnxMatMul_1

OnnxScatter_9

OnnxClip_6

OnnxMatMul_13

OnnxSelu

OnnxCol2Im

OnnxMatMul_9

OnnxSelu_1

OnnxCol2Im_18

OnnxMax

OnnxSelu_6

OnnxCompress

OnnxMaxPool

OnnxSequenceAt

OnnxCompress_11

OnnxMaxPool_1

OnnxSequenceAt_11

OnnxCompress_9

OnnxMaxPool_10

OnnxSequenceConstruct

OnnxConcat

OnnxMaxPool_11

OnnxSequenceConstruct_11

OnnxConcatFromSequence

OnnxMaxPool_12

OnnxSequenceEmpty

OnnxConcatFromSequence_11

OnnxMaxPool_8

OnnxSequenceEmpty_11

OnnxConcat_1

OnnxMaxRoiPool

OnnxSequenceErase

OnnxConcat_11

OnnxMaxRoiPool_1

OnnxSequenceErase_11

OnnxConcat_13

OnnxMaxUnpool

OnnxSequenceInsert

OnnxConcat_4

OnnxMaxUnpool_11

OnnxSequenceInsert_11

OnnxConstant

OnnxMaxUnpool_9

OnnxSequenceLength

OnnxConstantOfShape

OnnxMax_1

OnnxSequenceLength_11

OnnxConstantOfShape_9

OnnxMax_12

OnnxSequenceMap

OnnxConstant_1

OnnxMax_13

OnnxSequenceMap_17

OnnxConstant_11

OnnxMax_6

OnnxShape

OnnxConstant_12

OnnxMax_8

OnnxShape_1

OnnxConstant_13

OnnxMean

OnnxShape_13

OnnxConstant_9

OnnxMeanVarianceNormalization

OnnxShape_15

OnnxConv

OnnxMeanVarianceNormalization_13

OnnxShrink

OnnxConvInteger

OnnxMeanVarianceNormalization_9

OnnxShrink_9

OnnxConvInteger_10

OnnxMean_1

OnnxSigmoid

OnnxConvTranspose

OnnxMean_13

OnnxSigmoid_1

OnnxConvTranspose_1

OnnxMean_6

OnnxSigmoid_13

OnnxConvTranspose_11

OnnxMean_8

OnnxSigmoid_6

OnnxConv_1

OnnxMelWeightMatrix

OnnxSign

OnnxConv_11

OnnxMelWeightMatrix_17

OnnxSign_13

OnnxCos

OnnxMin

OnnxSign_9

OnnxCos_7

OnnxMin_1

OnnxSin

OnnxCosh

OnnxMin_12

OnnxSin_7

OnnxCosh_9

OnnxMin_13

OnnxSinh

OnnxCumSum

OnnxMin_6

OnnxSinh_9

OnnxCumSum_11

OnnxMin_8

OnnxSize

OnnxCumSum_14

OnnxMish

OnnxSize_1

OnnxDFT

OnnxMish_18

OnnxSize_13

OnnxDFT_17

OnnxMod

OnnxSlice

OnnxDepthToSpace

OnnxMod_10

OnnxSlice_1

OnnxDepthToSpace_1

OnnxMod_13

OnnxSlice_10

OnnxDepthToSpace_11

OnnxMomentum

OnnxSlice_11

OnnxDepthToSpace_13

OnnxMomentum_1

OnnxSlice_13

OnnxDequantizeLinear

OnnxMul

OnnxSoftmax

OnnxDequantizeLinear_10

OnnxMul_1

OnnxSoftmaxCrossEntropyLoss

OnnxDequantizeLinear_13

OnnxMul_13

OnnxSoftmaxCrossEntropyLoss_12

OnnxDet

OnnxMul_14

OnnxSoftmaxCrossEntropyLoss_13

OnnxDet_11

OnnxMul_6

OnnxSoftmax_1

OnnxDictVectorizer

OnnxMul_7

OnnxSoftmax_11

OnnxDictVectorizer_1

OnnxMultinomial

OnnxSoftmax_13

OnnxDiv

OnnxMultinomial_7

OnnxSoftplus

OnnxDiv_1

OnnxNeg

OnnxSoftplus_1

OnnxDiv_13

OnnxNeg_1

OnnxSoftsign

OnnxDiv_14

OnnxNeg_13

OnnxSoftsign_1

OnnxDiv_6

OnnxNeg_6

OnnxSpaceToDepth

OnnxDiv_7

OnnxNegativeLogLikelihoodLoss

OnnxSpaceToDepth_1

OnnxDropout

OnnxNegativeLogLikelihoodLoss_12

OnnxSpaceToDepth_13

OnnxDropout_1

OnnxNegativeLogLikelihoodLoss_13

OnnxSplit

OnnxDropout_10

OnnxNonMaxSuppression

OnnxSplitToSequence

OnnxDropout_12

OnnxNonMaxSuppression_10

OnnxSplitToSequence_11

OnnxDropout_13

OnnxNonMaxSuppression_11

OnnxSplit_1

OnnxDropout_6

OnnxNonZero

OnnxSplit_11

OnnxDropout_7

OnnxNonZero_13

OnnxSplit_13

OnnxDynamicQuantizeLinear

OnnxNonZero_9

OnnxSplit_18

OnnxDynamicQuantizeLinear_11

OnnxNormalizer

OnnxSplit_2

OnnxEinsum

OnnxNormalizer_1

OnnxSqrt

OnnxEinsum_12

OnnxNot

OnnxSqrt_1

OnnxElu

OnnxNot_1

OnnxSqrt_13

OnnxElu_1

OnnxOneHot

OnnxSqrt_6

OnnxElu_6

OnnxOneHotEncoder

OnnxSqueeze

OnnxEqual

OnnxOneHotEncoder_1

OnnxSqueeze_1

OnnxEqual_1

OnnxOneHot_11

OnnxSqueeze_11

OnnxEqual_11

OnnxOneHot_9

OnnxSqueeze_13

OnnxEqual_13

OnnxOptional

OnnxStringNormalizer

OnnxEqual_19

OnnxOptionalGetElement

OnnxStringNormalizer_10

OnnxEqual_7

OnnxOptionalGetElement_15

OnnxSub

OnnxErf

OnnxOptionalGetElement_18

OnnxSub_1

OnnxErf_13

OnnxOptionalHasElement

OnnxSub_13

OnnxErf_9

OnnxOptionalHasElement_15

OnnxSub_14

OnnxExp

OnnxOptionalHasElement_18

OnnxSub_6

OnnxExp_1

OnnxOptional_15

OnnxSub_7

OnnxExp_13

OnnxOr

OnnxSum

OnnxExp_6

OnnxOr_1

OnnxSum_1

OnnxExpand

OnnxOr_7

OnnxSum_13

OnnxExpand_13

OnnxPRelu

OnnxSum_6

OnnxExpand_8

OnnxPRelu_1

OnnxSum_8

OnnxEyeLike

OnnxPRelu_16

OnnxTan

OnnxEyeLike_9

OnnxPRelu_6

OnnxTan_7

OnnxFeatureVectorizer

OnnxPRelu_7

OnnxTanh

OnnxFeatureVectorizer_1

OnnxPRelu_9

OnnxTanh_1

OnnxFlatten

OnnxPad

OnnxTanh_13

OnnxFlatten_1

OnnxPad_1

OnnxTanh_6

OnnxFlatten_11

OnnxPad_11

OnnxTfIdfVectorizer

OnnxFlatten_13

OnnxPad_13

OnnxTfIdfVectorizer_9

OnnxFlatten_9

OnnxPad_18

OnnxThresholdedRelu

OnnxFloor

OnnxPad_19

OnnxThresholdedRelu_10

OnnxFloor_1

OnnxPad_2

OnnxTile

OnnxFloor_13

OnnxPow

OnnxTile_1

OnnxFloor_6

OnnxPow_1

OnnxTile_13

OnnxGRU

OnnxPow_12

OnnxTile_6

OnnxGRU_1

OnnxPow_13

OnnxTopK

OnnxGRU_14

OnnxPow_15

OnnxTopK_1

OnnxGRU_3

OnnxPow_7

OnnxTopK_10

OnnxGRU_7

OnnxQLinearConv

OnnxTopK_11

OnnxGather

OnnxQLinearConv_10

OnnxTranspose

OnnxGatherElements

OnnxQLinearMatMul

OnnxTranspose_1

OnnxGatherElements_11

OnnxQLinearMatMul_10

OnnxTranspose_13

OnnxGatherElements_13

OnnxQuantizeLinear

OnnxTreeEnsembleClassifier

OnnxGatherND

OnnxQuantizeLinear_10

OnnxTreeEnsembleClassifier_1

OnnxGatherND_11

OnnxQuantizeLinear_13

OnnxTreeEnsembleClassifier_3

OnnxGatherND_12

OnnxRNN

OnnxTreeEnsembleRegressor

OnnxGatherND_13

OnnxRNN_1

OnnxTreeEnsembleRegressor_1

OnnxGather_1

OnnxRNN_14

OnnxTreeEnsembleRegressor_3

OnnxGather_11

OnnxRNN_7

OnnxTrilu

OnnxGather_13

OnnxRandomNormal

OnnxTrilu_14

OnnxGemm

OnnxRandomNormalLike

OnnxUnique

OnnxGemm_1

OnnxRandomNormalLike_1

OnnxUnique_11

OnnxGemm_11

OnnxRandomNormal_1

OnnxUnsqueeze

OnnxGemm_13

OnnxRandomUniform

OnnxUnsqueeze_1

OnnxGemm_6

OnnxRandomUniformLike

OnnxUnsqueeze_11

OnnxGemm_7

OnnxRandomUniformLike_1

OnnxUnsqueeze_13

OnnxGemm_9

OnnxRandomUniform_1

OnnxUpsample

OnnxGlobalAveragePool

OnnxRange

OnnxUpsample_10

OnnxGlobalAveragePool_1

OnnxRange_11

OnnxUpsample_7

OnnxGlobalLpPool

OnnxReciprocal

OnnxUpsample_9

OnnxGlobalLpPool_1

OnnxReciprocal_1

OnnxWhere

OnnxGlobalLpPool_2

OnnxReciprocal_13

OnnxWhere_16

OnnxGlobalMaxPool

OnnxReciprocal_6

OnnxWhere_9

OnnxGlobalMaxPool_1

OnnxReduceL1

OnnxXor

OnnxGradient

OnnxReduceL1_1

OnnxXor_1

OnnxGradient_1

OnnxReduceL1_11

OnnxXor_7

OnnxGreater

OnnxReduceL1_13

OnnxZipMap

OnnxGreaterOrEqual

OnnxReduceL1_18

OnnxZipMap_1

OnnxGreaterOrEqual_12

OnnxReduceL2

+
+
+
+
+

OnnxAbs#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAbs(*args, **kwargs)#
+

Version

+

Onnx name: Abs

+

This version of the operator has been available since +version 13.

+

Summary

+

Absolute takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the absolute is, y = abs(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAbs_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAbs_1(*args, **kwargs)#
+

Version

+

Onnx name: Abs

+

This version of the operator has been available since +version 1.

+

Summary

+

Absolute takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the absolute is, y = abs(x), is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAbs_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAbs_13(*args, **kwargs)#
+

Version

+

Onnx name: Abs

+

This version of the operator has been available since +version 13.

+

Summary

+

Absolute takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the absolute is, y = abs(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAbs_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAbs_6(*args, **kwargs)#
+

Version

+

Onnx name: Abs

+

This version of the operator has been available since +version 6.

+

Summary

+

Absolute takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the absolute is, y = abs(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAcos#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAcos(*args, **kwargs)#
+

Version

+

Onnx name: Acos

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arccosine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAcos_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAcos_7(*args, **kwargs)#
+

Version

+

Onnx name: Acos

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arccosine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAcosh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAcosh(*args, **kwargs)#
+

Version

+

Onnx name: Acosh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arccosine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arccosine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAcosh_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAcosh_9(*args, **kwargs)#
+

Version

+

Onnx name: Acosh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arccosine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arccosine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdagrad#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdagrad(*args, **kwargs)#
+

Version

+

Onnx name: Adagrad

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of ADAGRAD, a stochastic gradient based optimization +algorithm. This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. As you can imagine, ADAGRAD requires +some parameters:

+
+
    +
  • The initial learning-rate “R”.

  • +
  • The update count “T”. That is, the number of training iterations conducted.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A learning-rate decay factor “decay_factor”.

  • +
  • A small constant “epsilon” to avoid dividing-by-zero.

  • +
+
+

At each ADAGRAD iteration, the optimized tensors are moved along a direction +computed based on their estimated gradient and accumulated squared gradient. Assume +that only a single tensor “X” is updated by this operator. We need the value of “X”, +its gradient “G”, and its accumulated squared gradient “H”. Therefore, variables in +this operator’s input list are sequentially “R”, “T”, “X”, “G”, and “H”. Other +parameters are given as attributes because they are usually constants. Also, the +corresponding output tensors are the new value of “X” (called “X_new”), and then +the new accumulated squared gradient (called “H_new”). Those outputs are computed +from the given inputs following the pseudo code below.

+

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with +numpy-style broadcasting support. The pseudo code to compute those outputs is:

+
+

// Compute a scalar learning-rate factor. At the first update of X, T is generally +// 0 (0-based update index) or 1 (1-based update index). +r = R / (1 + T * decay_factor);

+

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. +G_regularized = norm_coefficient * X + G;

+

// Compute new accumulated squared gradient. +H_new = H + G_regularized * G_regularized;

+

// Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(…) +// computes element-wise square-root. +H_adaptive = Sqrt(H_new) + epsilon

+

// Compute the new value of “X”. +X_new = X - r * G_regularized / H_adaptive;

+
+

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”, the same +pseudo code may be extended to handle all tensors jointly. More specifically, we can view “X” as a +concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should +be concatenated too) and then just reuse the entire pseudo code.

+

Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. +In that reference paper, this operator is a special case of the Figure 1’s composite mirror +descent update.

+

Attributes

+
    +
  • decay_factor: The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn’t reduce the learning rate. Default value is +name: "decay_factor" f: 0.0 type: FLOAT

  • +
  • epsilon: Small scalar to avoid dividing by zero. Default value is +name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • +
  • norm_coefficient: Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient" f: 0.0 type: FLOAT

  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The initial learning rate.

  • +
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor “X_1” and “X_2” are optimized, The input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdagrad_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdagrad_1(*args, **kwargs)#
+

Version

+

Onnx name: Adagrad

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of ADAGRAD, a stochastic gradient based optimization +algorithm. This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. As you can imagine, ADAGRAD requires +some parameters:

+
+
    +
  • The initial learning-rate “R”.

  • +
  • The update count “T”. That is, the number of training iterations conducted.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A learning-rate decay factor “decay_factor”.

  • +
  • A small constant “epsilon” to avoid dividing-by-zero.

  • +
+
+

At each ADAGRAD iteration, the optimized tensors are moved along a direction +computed based on their estimated gradient and accumulated squared gradient. Assume +that only a single tensor “X” is updated by this operator. We need the value of “X”, +its gradient “G”, and its accumulated squared gradient “H”. Therefore, variables in +this operator’s input list are sequentially “R”, “T”, “X”, “G”, and “H”. Other +parameters are given as attributes because they are usually constants. Also, the +corresponding output tensors are the new value of “X” (called “X_new”), and then +the new accumulated squared gradient (called “H_new”). Those outputs are computed +from the given inputs following the pseudo code below.

+

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with +numpy-style broadcasting support. The pseudo code to compute those outputs is:

+
+

// Compute a scalar learning-rate factor. At the first update of X, T is generally +// 0 (0-based update index) or 1 (1-based update index). +r = R / (1 + T * decay_factor);

+

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. +G_regularized = norm_coefficient * X + G;

+

// Compute new accumulated squared gradient. +H_new = H + G_regularized * G_regularized;

+

// Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(…) +// computes element-wise square-root. +H_adaptive = Sqrt(H_new) + epsilon

+

// Compute the new value of “X”. +X_new = X - r * G_regularized / H_adaptive;

+
+

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”, the same +pseudo code may be extended to handle all tensors jointly. More specifically, we can view “X” as a +concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should +be concatenated too) and then just reuse the entire pseudo code.

+

Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. +In that reference paper, this operator is a special case of the Figure 1’s composite mirror +descent update.

+

Attributes

+
    +
  • decay_factor: The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn’t reduce the learning rate. Default value is +name: "decay_factor" f: 0.0 type: FLOAT

  • +
  • epsilon: Small scalar to avoid dividing by zero. Default value is +name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • +
  • norm_coefficient: Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient" f: 0.0 type: FLOAT

  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The initial learning rate.

  • +
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor “X_1” and “X_2” are optimized, The input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdam#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdam(*args, **kwargs)#
+

Version

+

Onnx name: Adam

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of Adam, a stochastic gradient based optimization +algorithm. This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. First of all, Adam requires +some parameters:

+
+
    +
  • The learning-rate “R”.

  • +
  • The update count “T”. That is, the number of training iterations conducted.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A small constant “epsilon” to avoid dividing-by-zero.

  • +
  • Two coefficients, “alpha” and “beta”.

  • +
+
+

At each Adam iteration, the optimized tensors are moved along a direction +computed based on their exponentially-averaged historical gradient and +exponentially-averaged historical squared gradient. Assume that only a tensor +“X” is being optimized. The rest of required information is

+
+
    +
  • the value of “X”,

  • +
  • “X“‘s gradient (denoted by “G”),

  • +
  • “X“‘s exponentially-averaged historical gradient (denoted by “V”), and

  • +
  • “X“‘s exponentially-averaged historical squared gradient (denoted by “H”).

  • +
+
+

Some of those parameters are passed into this operator as input tensors and others +are stored as this operator’s attributes. Specifically, this operator’s input tensor +list is [“R”, “T”, “X”, “G”, “V”, “H”]. That is, “R” is the first input, “T” is +the second input, and so on. Other parameters are given as attributes because they +are constants. Moreover, the corresponding output tensors are

+
+
    +
  • the new value of “X” (called “X_new”),

  • +
  • the new exponentially-averaged historical gradient (denoted by “V_new”), and

  • +
  • the new exponentially-averaged historical squared gradient (denoted by “H_new”).

  • +
+
+

Those outputs are computed following the pseudo code below.

+

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with +numpy-style broadcasting support. The pseudo code to compute those outputs is:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. +G_regularized = norm_coefficient * X + G

+

// Update exponentially-averaged historical gradient. +V_new = alpha * V + (1 - alpha) * G_regularized

+

// Update exponentially-averaged historical squared gradient. +H_new = beta * H + (1 - beta) * G_regularized * G_regularized

+

// Compute the element-wise square-root of H_new. V_new will be element-wisely +// divided by H_sqrt for a better update direction. +H_sqrt = Sqrt(H_new) + epsilon

+

// Compute learning-rate. Note that “alpha**T”/”beta**T” is alpha’s/beta’s T-th power. +R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R

+

// Compute new value of “X”. +X_new = X - R_adjusted * V_new / H_sqrt

+

// Post-update regularization. +X_final = (1 - norm_coefficient_post) * X_new

+
+

If there are multiple inputs to be optimized, the pseudo code will be applied +independently to each of them.

+

Attributes

+
    +
  • alpha: Coefficient of previously accumulated gradient in running average. Default to 0.9. Default value is +name: "alpha" f: 0.8999999761581421 type: FLOAT

  • +
  • beta: Coefficient of previously accumulated squared-gradient in running average. Default to 0.999. Default value is +name: "beta" f: 0.9990000128746033 type: FLOAT

  • +
  • epsilon: Small scalar to avoid dividing by zero. Default value is +name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • +
  • norm_coefficient: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient" f: 0.0 type: FLOAT

  • +
  • norm_coefficient_post: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient_post" f: 0.0 type: FLOAT

  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The initial learning rate.

  • +
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors “X_1” and “X_2,”, the input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated gradient of “X_1”, accumulated gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors “X_1” and “X_2” are optimized, the outputs list would be [new value of “X_1”, new value of “X_2”, new accumulated gradient of “X_1”, new accumulated gradient of “X_2”, new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdam_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdam_1(*args, **kwargs)#
+

Version

+

Onnx name: Adam

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of Adam, a stochastic gradient based optimization +algorithm. This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. First of all, Adam requires +some parameters:

+
+
    +
  • The learning-rate “R”.

  • +
  • The update count “T”. That is, the number of training iterations conducted.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A small constant “epsilon” to avoid dividing-by-zero.

  • +
  • Two coefficients, “alpha” and “beta”.

  • +
+
+

At each Adam iteration, the optimized tensors are moved along a direction +computed based on their exponentially-averaged historical gradient and +exponentially-averaged historical squared gradient. Assume that only a tensor +“X” is being optimized. The rest of required information is

+
+
    +
  • the value of “X”,

  • +
  • “X“‘s gradient (denoted by “G”),

  • +
  • “X“‘s exponentially-averaged historical gradient (denoted by “V”), and

  • +
  • “X“‘s exponentially-averaged historical squared gradient (denoted by “H”).

  • +
+
+

Some of those parameters are passed into this operator as input tensors and others +are stored as this operator’s attributes. Specifically, this operator’s input tensor +list is [“R”, “T”, “X”, “G”, “V”, “H”]. That is, “R” is the first input, “T” is +the second input, and so on. Other parameters are given as attributes because they +are constants. Moreover, the corresponding output tensors are

+
+
    +
  • the new value of “X” (called “X_new”),

  • +
  • the new exponentially-averaged historical gradient (denoted by “V_new”), and

  • +
  • the new exponentially-averaged historical squared gradient (denoted by “H_new”).

  • +
+
+

Those outputs are computed following the pseudo code below.

+

Let “+”, “-”, “*”, and “/” are all element-wise arithmetic operations with +numpy-style broadcasting support. The pseudo code to compute those outputs is:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. +G_regularized = norm_coefficient * X + G

+

// Update exponentially-averaged historical gradient. +V_new = alpha * V + (1 - alpha) * G_regularized

+

// Update exponentially-averaged historical squared gradient. +H_new = beta * H + (1 - beta) * G_regularized * G_regularized

+

// Compute the element-wise square-root of H_new. V_new will be element-wisely +// divided by H_sqrt for a better update direction. +H_sqrt = Sqrt(H_new) + epsilon

+

// Compute learning-rate. Note that “alpha**T”/”beta**T” is alpha’s/beta’s T-th power. +R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R

+

// Compute new value of “X”. +X_new = X - R_adjusted * V_new / H_sqrt

+

// Post-update regularization. +X_final = (1 - norm_coefficient_post) * X_new

+
+

If there are multiple inputs to be optimized, the pseudo code will be applied +independently to each of them.

+

Attributes

+
    +
  • alpha: Coefficient of previously accumulated gradient in running average. Default to 0.9. Default value is +name: "alpha" f: 0.8999999761581421 type: FLOAT

  • +
  • beta: Coefficient of previously accumulated squared-gradient in running average. Default to 0.999. Default value is +name: "beta" f: 0.9990000128746033 type: FLOAT

  • +
  • epsilon: Small scalar to avoid dividing by zero. Default value is +name: "epsilon" f: 9.999999974752427e-07 type: FLOAT

  • +
  • norm_coefficient: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient" f: 0.0 type: FLOAT

  • +
  • norm_coefficient_post: Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization. Default value is +name: "norm_coefficient_post" f: 0.0 type: FLOAT

  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The initial learning rate.

  • +
  • T (heterogeneous)T2: The update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors “X_1” and “X_2,”, the input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, accumulated gradient of “X_1”, accumulated gradient of “X_2”, accumulated squared gradient of “X_1”, accumulated squared gradient of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors “X_1” and “X_2” are optimized, the outputs list would be [new value of “X_1”, new value of “X_2”, new accumulated gradient of “X_1”, new accumulated gradient of “X_2”, new accumulated squared gradient of “X_1”, new accumulated squared gradient of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary addition (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd_1(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 1.

+

Summary

+

Performs element-wise binary addition (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd_13(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary addition (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd_14(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary addition (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd_6(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 6.

+

Summary

+

Performs element-wise binary addition (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAdd_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAdd_7(*args, **kwargs)#
+

Version

+

Onnx name: Add

+

This version of the operator has been available since +version 7.

+

Summary

+

Performs element-wise binary addition (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxAnd#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAnd(*args, **kwargs)#
+

Version

+

Onnx name: And

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the and logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxAnd_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAnd_1(*args, **kwargs)#
+

Version

+

Onnx name: And

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the and logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxAnd_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAnd_7(*args, **kwargs)#
+

Version

+

Onnx name: And

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the and logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxArgMax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMax(*args, **kwargs)#
+

Version

+

Onnx name: ArgMax

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the indices of the max elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the max +is selected if the max appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMax_1(*args, **kwargs)#
+

Version

+

Onnx name: ArgMax

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the indices of the max elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMax_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMax_11(*args, **kwargs)#
+

Version

+

Onnx name: ArgMax

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the indices of the max elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMax_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMax_12(*args, **kwargs)#
+

Version

+

Onnx name: ArgMax

+

This version of the operator has been available since +version 12.

+

Summary

+

Computes the indices of the max elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the max +is selected if the max appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMax_13(*args, **kwargs)#
+

Version

+

Onnx name: ArgMax

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the indices of the max elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the max +is selected if the max appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMin#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMin(*args, **kwargs)#
+

Version

+

Onnx name: ArgMin

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the indices of the min elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the min +is selected if the min appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMin_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMin_1(*args, **kwargs)#
+

Version

+

Onnx name: ArgMin

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the indices of the min elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMin_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMin_11(*args, **kwargs)#
+

Version

+

Onnx name: ArgMin

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the indices of the min elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMin_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMin_12(*args, **kwargs)#
+

Version

+

Onnx name: ArgMin

+

This version of the operator has been available since +version 12.

+

Summary

+

Computes the indices of the min elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the min +is selected if the min appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArgMin_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArgMin_13(*args, **kwargs)#
+

Version

+

Onnx name: ArgMin

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the indices of the min elements of the input tensor’s element along the +provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. +If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. +If select_last_index is True (default False), the index of the last occurrence of the min +is selected if the min appears more than once in the input. Otherwise the index of the +first occurrence is selected. +The type of the output tensor is integer.

+

Attributes

+
    +
  • axis: The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • select_last_index: Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index). Default value is +name: "select_last_index" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)tensor(int64): Reduced output tensor with integer data type.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxArrayFeatureExtractor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor(*args, **kwargs)#
+

Version

+

Onnx name: ArrayFeatureExtractor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Select elements of the input tensor based on the indices passed.

+

The indices are applied to the last axes of the tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Data to be selected

  • +
  • Y (heterogeneous)tensor(int64): The indices, based on 0 as the first index of any dimension.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Selected output data as an array

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32), tensor(string): The input must be a tensor of a numeric type or string. The output will be of the same tensor type.

  • +
+
+ +
+
+
+
+

OnnxArrayFeatureExtractor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxArrayFeatureExtractor_1(*args, **kwargs)#
+

Version

+

Onnx name: ArrayFeatureExtractor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Select elements of the input tensor based on the indices passed.

+

The indices are applied to the last axes of the tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Data to be selected

  • +
  • Y (heterogeneous)tensor(int64): The indices, based on 0 as the first index of any dimension.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Selected output data as an array

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32), tensor(string): The input must be a tensor of a numeric type or string. The output will be of the same tensor type.

  • +
+
+ +
+
+
+
+

OnnxAsin#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAsin(*args, **kwargs)#
+

Version

+

Onnx name: Asin

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arcsine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAsin_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAsin_7(*args, **kwargs)#
+

Version

+

Onnx name: Asin

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arcsine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAsinh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAsinh(*args, **kwargs)#
+

Version

+

Onnx name: Asinh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arcsine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arcsine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAsinh_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAsinh_9(*args, **kwargs)#
+

Version

+

Onnx name: Asinh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arcsine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arcsine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAtan#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAtan(*args, **kwargs)#
+

Version

+

Onnx name: Atan

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arctangent of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAtan_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAtan_7(*args, **kwargs)#
+

Version

+

Onnx name: Atan

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The arctangent of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAtanh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAtanh(*args, **kwargs)#
+

Version

+

Onnx name: Atanh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arctangent of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arctangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAtanh_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAtanh_9(*args, **kwargs)#
+

Version

+

Onnx name: Atanh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic arctangent of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic arctangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAveragePool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 19.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is +name: "count_include_pad" i: 0 type: INT

  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxAveragePool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool_1(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 1.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxAveragePool_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool_10(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 10.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled

+
* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is +name: "count_include_pad" i: 0 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxAveragePool_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool_11(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 11.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled

+
* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is +name: "count_include_pad" i: 0 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxAveragePool_19#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool_19(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 19.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is +name: "count_include_pad" i: 0 type: INT

  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxAveragePool_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxAveragePool_7(*args, **kwargs)#
+

Version

+

Onnx name: AveragePool

+

This version of the operator has been available since +version 7.

+

Summary

+

AveragePool consumes an input tensor X and applies average pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +average pooling consisting of computing the average on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • count_include_pad: Whether include pad pixels when calculating values for the edges. Default is 0, doesn’t count include pad. Default value is +name: "count_include_pad" i: 0 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 15.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and +‘input_var’. +Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated +statistics in inference mode (training_mode=False, default), +and the running statistics in training mode (training_mode=True). +There are multiple cases for the number of outputs, which we list below:

+
    +
  • Output case #1: Y, running_mean, running_var (training_mode=True)

  • +
  • Output case #2: Y (training_mode=False)

  • +
+

When training_mode=False, extra outputs are invalid. +The outputs are updated as follows when training_mode=True:

+
running_mean = input_mean * momentum + current_mean * (1 - momentum)
+running_var = input_var * momentum + current_var * (1 - momentum)
+
+Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
+
+
+

where:

+
current_mean = ReduceMean(X, axis=all_except_channel_index)
+current_var =  ReduceVar(X, axis=all_except_channel_index)
+
+
+

Notice that ReduceVar refers to the population variance, and it equals to +sum(sqrd(x_i - x_avg)) / N +where N is the population size (this formula does not use sample size N - 1).

+

The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.

+

When training_mode=False:

+
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
+
+
+

For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is +name: "training_mode" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • +
  • scale (heterogeneous)T1: Scale tensor of shape (C).

  • +
  • B (heterogeneous)T1: Bias tensor of shape (C).

  • +
  • input_mean (heterogeneous)T2: running (training) or estimated (testing) mean tensor of shape (C).

  • +
  • input_var (heterogeneous)T2: running (training) or estimated (testing) variance tensor of shape (C).

  • +
+

Outputs

+

Between 1 and 3 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • +
  • running_mean (optional, heterogeneous)T2: The running mean after the BatchNormalization operator.

  • +
  • running_var (optional, heterogeneous)T2: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain scale and bias types to float tensors.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_1(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 1.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +there are multiple cases for the number of outputs, which we list below:

+

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) +Output case #2: Y (test mode)

+

Attributes

+
    +
  • +
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • is_test: If set to nonzero, run spatial batch normalization in test mode, default is 0. Default value is +name: "is_test" i: 0 type: INT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f. Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • spatial: If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1. Default value is +name: "spatial" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: The input 4-dimensional tensor of shape NCHW.

  • +
  • scale (heterogeneous)T: The scale as a 1-dimensional tensor of size C to be applied to the output.

  • +
  • B (heterogeneous)T: The bias as a 1-dimensional tensor of size C to be applied to the output.

  • +
  • mean (heterogeneous)T: The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.

  • +
  • var (heterogeneous)T: The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.

  • +
+

Outputs

+

Between 1 and 5 outputs.

+
    +
  • Y (heterogeneous)T: The output 4-dimensional tensor of the same shape as X.

  • +
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.

  • +
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.

  • +
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation. Should not be used for testing.

  • +
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation. Should not be used for testing.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_14(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 14.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and +‘input_var’. +Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated +statistics in inference mode (training_mode=False, default), +and the running statistics in training mode (training_mode=True). +There are multiple cases for the number of outputs, which we list below:

+

Output case #1: Y, running_mean, running_var (training_mode=True) +Output case #2: Y (training_mode=False)

+

When training_mode=False, extra outputs are invalid. +The outputs are updated as follows when training_mode=True:

+
running_mean = input_mean * momentum + current_mean * (1 - momentum)
+running_var = input_var * momentum + current_var * (1 - momentum)
+
+Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
+
+where:
+
+current_mean = ReduceMean(X, axis=all_except_channel_index)
+current_var =  ReduceVar(X, axis=all_except_channel_index)
+
+Notice that ReduceVar refers to the population variance, and it equals to
+sum(sqrd(x_i - x_avg)) / N
+where N is the population size (this formula does not use sample size N - 1).
+
+
+

When training_mode=False:

+
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
+
+
+

For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is +name: "training_mode" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • +
  • scale (heterogeneous)T: Scale tensor of shape (C).

  • +
  • B (heterogeneous)T: Bias tensor of shape (C).

  • +
  • input_mean (heterogeneous)U: running (training) or estimated (testing) mean tensor of shape (C).

  • +
  • input_var (heterogeneous)U: running (training) or estimated (testing) variance tensor of shape (C).

  • +
+

Outputs

+

Between 1 and 3 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • +
  • running_mean (optional, heterogeneous)U: The running mean after the BatchNormalization operator.

  • +
  • running_var (optional, heterogeneous)U: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • U tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors. It allows all float type for U.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_15(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 15.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +There are five required inputs ‘X’, ‘scale’, ‘B’, ‘input_mean’ and +‘input_var’. +Note that ‘input_mean’ and ‘input_var’ are expected to be the estimated +statistics in inference mode (training_mode=False, default), +and the running statistics in training mode (training_mode=True). +There are multiple cases for the number of outputs, which we list below:

+
    +
  • Output case #1: Y, running_mean, running_var (training_mode=True)

  • +
  • Output case #2: Y (training_mode=False)

  • +
+

When training_mode=False, extra outputs are invalid. +The outputs are updated as follows when training_mode=True:

+
running_mean = input_mean * momentum + current_mean * (1 - momentum)
+running_var = input_var * momentum + current_var * (1 - momentum)
+
+Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B
+
+
+

where:

+
current_mean = ReduceMean(X, axis=all_except_channel_index)
+current_var =  ReduceVar(X, axis=all_except_channel_index)
+
+
+

Notice that ReduceVar refers to the population variance, and it equals to +sum(sqrd(x_i - x_avg)) / N +where N is the population size (this formula does not use sample size N - 1).

+

The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.

+

When training_mode=False:

+
Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B
+
+
+

For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C * D1 * D2 * … * Dn) before a BatchNormalization Op. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • training_mode: If set to true, it indicates BatchNormalization is being used for training, and outputs 1, 2, 3, and 4 would be populated. Default value is +name: "training_mode" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • +
  • scale (heterogeneous)T1: Scale tensor of shape (C).

  • +
  • B (heterogeneous)T1: Bias tensor of shape (C).

  • +
  • input_mean (heterogeneous)T2: running (training) or estimated (testing) mean tensor of shape (C).

  • +
  • input_var (heterogeneous)T2: running (training) or estimated (testing) variance tensor of shape (C).

  • +
+

Outputs

+

Between 1 and 3 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • +
  • running_mean (optional, heterogeneous)T2: The running mean after the BatchNormalization operator.

  • +
  • running_var (optional, heterogeneous)T2: The running variance after the BatchNormalization operator. This op uses the population size (N) for calculating variance, and not the sample size N-1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain scale and bias types to float tensors.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain mean and variance types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_6(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 6.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +there are multiple cases for the number of outputs, which we list below:

+

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) +Output case #2: Y (test mode)

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • is_test: If set to nonzero, run spatial batch normalization in test mode, default is 0. Default value is +name: "is_test" i: 0 type: INT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f. Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • spatial: If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1. Default value is +name: "spatial" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
  • scale (heterogeneous)T: The scale as a 1-dimensional tensor of size C to be applied to the output.

  • +
  • B (heterogeneous)T: The bias as a 1-dimensional tensor of size C to be applied to the output.

  • +
  • mean (heterogeneous)T: The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.

  • +
  • var (heterogeneous)T: The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.

  • +
+

Outputs

+

Between 1 and 5 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X.

  • +
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.

  • +
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.

  • +
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation. Should not be used for testing.

  • +
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation. Should not be used for testing.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_7(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 7.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +there are multiple cases for the number of outputs, which we list below:

+

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) +Output case #2: Y (test mode)

+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+
+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
  • spatial: If true, compute the mean and variance across per activation. If false, compute the mean and variance across per feature over each mini-batch. Default value is +name: "spatial" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
  • scale (heterogeneous)T: If spatial is true, the dimension of scale is (C). If spatial is false, the dimensions of scale are (C x D1 x … x Dn)

  • +
  • B (heterogeneous)T: If spatial is true, the dimension of bias is (C). If spatial is false, the dimensions of bias are (C x D1 x … x Dn)

  • +
  • mean (heterogeneous)T: If spatial is true, the dimension of the running mean (training) or the estimated mean (testing) is (C). If spatial is false, the dimensions of the running mean (training) or the estimated mean (testing) are (C x D1 x … x Dn).

  • +
  • var (heterogeneous)T: If spatial is true, the dimension of the running variance(training) or the estimated variance (testing) is (C). If spatial is false, the dimensions of the running variance(training) or the estimated variance (testing) are (C x D1 x … x Dn).

  • +
+

Outputs

+

Between 1 and 5 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • +
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator.

  • +
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator.

  • +
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation.

  • +
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBatchNormalization_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBatchNormalization_9(*args, **kwargs)#
+

Version

+

Onnx name: BatchNormalization

+

This version of the operator has been available since +version 9.

+

Summary

+

Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +there are multiple cases for the number of outputs, which we list below:

+

Output case #1: Y, mean, var, saved_mean, saved_var (training mode) +Output case #2: Y (test mode)

+

For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • momentum: Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum). Default value is +name: "momentum" f: 0.8999999761581421 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1

  • +
  • scale (heterogeneous)T: Scale tensor of shape (C).

  • +
  • B (heterogeneous)T: Bias tensor of shape (C).

  • +
  • mean (heterogeneous)T: running (training) or estimated (testing) mean tensor of shape (C).

  • +
  • var (heterogeneous)T: running (training) or estimated (testing) variance tensor of shape (C).

  • +
+

Outputs

+

Between 1 and 5 outputs.

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X

  • +
  • mean (optional, heterogeneous)T: The running mean after the BatchNormalization operator.

  • +
  • var (optional, heterogeneous)T: The running variance after the BatchNormalization operator.

  • +
  • saved_mean (optional, heterogeneous)T: Saved mean used during training to speed up gradient computation.

  • +
  • saved_var (optional, heterogeneous)T: Saved variance used during training to speed up gradient computation.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxBernoulli#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBernoulli(*args, **kwargs)#
+

Version

+

Onnx name: Bernoulli

+

This version of the operator has been available since +version 15.

+

Summary

+

Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor +containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, +where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p).

+

This operator is non-deterministic and may not produce the same values in different +implementations (even if a seed is specified).

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: All values in input have to be in the range:[0, 1].

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: The returned output tensor only has values 0 or 1, same shape as input tensor.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bool): Constrain output types to all numeric tensors and bool tensors.

  • +
+
+ +
+
+
+
+

OnnxBernoulli_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBernoulli_15(*args, **kwargs)#
+

Version

+

Onnx name: Bernoulli

+

This version of the operator has been available since +version 15.

+

Summary

+

Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor +containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, +where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p).

+

This operator is non-deterministic and may not produce the same values in different +implementations (even if a seed is specified).

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: All values in input have to be in the range:[0, 1].

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: The returned output tensor only has values 0 or 1, same shape as input tensor.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bool): Constrain output types to all numeric tensors and bool tensors.

  • +
+
+ +
+
+
+
+

OnnxBinarizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBinarizer(*args, **kwargs)#
+

Version

+

Onnx name: Binarizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.

+

Attributes

+
    +
  • threshold: Values greater than this are mapped to 1, others to 0. Default value is +name: "threshold" f: 0.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be binarized

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Binarized output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type. The output will be of the same tensor type.

  • +
+
+ +
+
+
+
+

OnnxBinarizer_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBinarizer_1(*args, **kwargs)#
+

Version

+

Onnx name: Binarizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.

+

Attributes

+
    +
  • threshold: Values greater than this are mapped to 1, others to 0. Default value is +name: "threshold" f: 0.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be binarized

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Binarized output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type. The output will be of the same tensor type.

  • +
+
+ +
+
+
+
+

OnnxBitShift#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitShift(*args, **kwargs)#
+

Version

+

Onnx name: BitShift

+

This version of the operator has been available since +version 11.

+

Summary

+

Bitwise shift operator performs element-wise operation. For each input element, if the +attribute “direction” is “RIGHT”, this operator moves its binary representation toward +the right side so that the input value is effectively decreased. If the attribute “direction” +is “LEFT”, bits of binary representation moves toward the left side, which results the +increase of its actual value. The input X is the tensor to be shifted and another input +Y specifies the amounts of shifting. For example, if “direction” is “Right”, X is [1, 4], +and S is [1, 1], the corresponding output Z would be [0, 2]. If “direction” is “LEFT” with +X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].

+

Because this operator supports Numpy-style broadcasting, X’s and Y’s shapes are +not necessarily identical. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: First operand, input to be shifted.

  • +
  • Y (heterogeneous)T: Second operand, amounts of shift.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64): Constrain input and output types to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitShift_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitShift_11(*args, **kwargs)#
+

Version

+

Onnx name: BitShift

+

This version of the operator has been available since +version 11.

+

Summary

+

Bitwise shift operator performs element-wise operation. For each input element, if the +attribute “direction” is “RIGHT”, this operator moves its binary representation toward +the right side so that the input value is effectively decreased. If the attribute “direction” +is “LEFT”, bits of binary representation moves toward the left side, which results the +increase of its actual value. The input X is the tensor to be shifted and another input +Y specifies the amounts of shifting. For example, if “direction” is “Right”, X is [1, 4], +and S is [1, 1], the corresponding output Z would be [0, 2]. If “direction” is “LEFT” with +X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].

+

Because this operator supports Numpy-style broadcasting, X’s and Y’s shapes are +not necessarily identical. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: First operand, input to be shifted.

  • +
  • Y (heterogeneous)T: Second operand, amounts of shift.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64): Constrain input and output types to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseAnd#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseAnd(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseAnd

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise and operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseAnd_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseAnd_18(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseAnd

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise and operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseNot#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseNot(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseNot

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the bitwise not of the input tensor element-wise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input/output to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseNot_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseNot_18(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseNot

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the bitwise not of the input tensor element-wise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input/output to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseOr#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseOr(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseOr

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise or operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseOr_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseOr_18(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseOr

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise or operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseXor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseXor(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseXor

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise xor operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBitwiseXor_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBitwiseXor_18(*args, **kwargs)#
+

Version

+

Onnx name: BitwiseXor

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns the tensor resulting from performing the bitwise xor operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the bitwise operator.

  • +
  • B (heterogeneous)T: Second input operand for the bitwise operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64): Constrain input to integer tensors.

  • +
+
+ +
+
+
+
+

OnnxBlackmanWindow#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBlackmanWindow(*args, **kwargs)#
+

Version

+

Onnx name: BlackmanWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Blackman window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxBlackmanWindow_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxBlackmanWindow_17(*args, **kwargs)#
+

Version

+

Onnx name: BlackmanWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Blackman window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxCast#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCast(*args, **kwargs)#
+

Version

+

Onnx name: Cast

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator casts the elements of a given input tensor to a data type +specified by the ‘to’ argument and returns an output tensor of the same size in +the converted type. The ‘to’ argument must be one of the data types specified +in the ‘DataType’ enum field in the TensorProto message.

+

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations +(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may +result 100. There are some string literals reserved for special floating-point values; +“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, +this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors +to string tensors, plain floating-point representation (such as “314.15926”) would be used. +Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases +of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

+

Conversion from a numerical type to any numerical type is always allowed. +User must be aware of precision loss and value change caused by range difference between two types. +For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting +an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

+

In more detail, the conversion among numerical types should follow these rules:

+
    +
  • Casting from floating point to: +* floating point: +/- infinity if OOR (out of range). +* fixed point: undefined if OOR. +* bool: +/- 0.0 to False; all else to True.

  • +
  • Casting from fixed point to: +* floating point: +/- infinity if OOR. (+ infinity in the case of uint) +* fixed point: when OOR, discard higher bits and reinterpret (with respect to two’s complement representation for

    +
    +

    signed types). For example, 200 (int16) -> -56 (int8).

    +
    +
      +
    • bool: zero to False; nonzero to True.

    • +
    +
  • +
  • Casting from bool to: +* floating point: {1.0, 0.0}. +* fixed point: {1, 0}. +* bool: no change.

  • +
+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • +
+
+ +
+
+
+
+

OnnxCastLike#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCastLike(*args, **kwargs)#
+

Version

+

Onnx name: CastLike

+

This version of the operator has been available since +version 15.

+

Summary

+

The operator casts the elements of a given input tensor (the first input) to +the same data type as the elements of the second input tensor. +See documentation of the Cast operator for further details.

+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
  • target_type (heterogeneous)T2: The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor produced by casting the first input tensor to have the same type as the second input tensor.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • +
+
+ +
+
+
+
+

OnnxCastLike_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCastLike_15(*args, **kwargs)#
+

Version

+

Onnx name: CastLike

+

This version of the operator has been available since +version 15.

+

Summary

+

The operator casts the elements of a given input tensor (the first input) to +the same data type as the elements of the second input tensor. +See documentation of the Cast operator for further details.

+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
  • target_type (heterogeneous)T2: The (first) input tensor will be cast to produce a tensor of the same type as this (second input) tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor produced by casting the first input tensor to have the same type as the second input tensor.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • +
+
+ +
+
+
+
+

OnnxCastMap#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCastMap(*args, **kwargs)#
+

Version

+

Onnx name: CastMap

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Converts a map to a tensor. +The map key must be an int64 and the values will be ordered +in ascending order based on this key. +The operator supports dense packing or sparse packing. +If using sparse packing, the key cannot exceed the max_map-1 value.

+

Attributes

+
    +
  • cast_to: A string indicating the desired element type of the output tensor, one of ‘TO_FLOAT’, ‘TO_STRING’, ‘TO_INT64’. Default value is +name: "cast_to" s: "TO_FLOAT" type: STRING

  • +
  • map_form: Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).<br>One of ‘DENSE’, ‘SPARSE’. Default value is +name: "map_form" s: "DENSE" type: STRING

  • +
  • max_map: If the value of map_form is ‘SPARSE,’ this attribute indicates the total length of the output tensor. Default value is +name: "max_map" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: The input map that is to be cast to a tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: A tensor representing the same data as the input map, ordered by their keys

  • +
+

Type Constraints

+
    +
  • T1 map(int64, string), map(int64, float): The input must be an integer map to either string or float.

  • +
  • T2 tensor(string), tensor(float), tensor(int64): The output is a 1-D tensor of string, float, or integer.

  • +
+
+ +
+
+
+
+

OnnxCastMap_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCastMap_1(*args, **kwargs)#
+

Version

+

Onnx name: CastMap

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Converts a map to a tensor. +The map key must be an int64 and the values will be ordered +in ascending order based on this key. +The operator supports dense packing or sparse packing. +If using sparse packing, the key cannot exceed the max_map-1 value.

+

Attributes

+
    +
  • cast_to: A string indicating the desired element type of the output tensor, one of ‘TO_FLOAT’, ‘TO_STRING’, ‘TO_INT64’. Default value is +name: "cast_to" s: "TO_FLOAT" type: STRING

  • +
  • map_form: Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).<br>One of ‘DENSE’, ‘SPARSE’. Default value is +name: "map_form" s: "DENSE" type: STRING

  • +
  • max_map: If the value of map_form is ‘SPARSE,’ this attribute indicates the total length of the output tensor. Default value is +name: "max_map" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: The input map that is to be cast to a tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: A tensor representing the same data as the input map, ordered by their keys

  • +
+

Type Constraints

+
    +
  • T1 map(int64, string), map(int64, float): The input must be an integer map to either string or float.

  • +
  • T2 tensor(string), tensor(float), tensor(int64): The output is a 1-D tensor of string, float, or integer.

  • +
+
+ +
+
+
+
+

OnnxCast_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCast_1(*args, **kwargs)#
+

Version

+

Onnx name: Cast

+

This version of the operator has been available since +version 1.

+

Summary

+

The operator casts the elements of a given input tensor to a data type +specified by the ‘to’ argument and returns an output tensor of the same size in +the converted type. The ‘to’ argument must be one of the data types specified +in the ‘DataType’ enum field in the TensorProto message. +NOTE: Casting to and from strings is not supported yet.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Casting from strings and complex are not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Casting to strings and complex are not supported.

  • +
+
+ +
+
+
+
+

OnnxCast_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCast_13(*args, **kwargs)#
+

Version

+

Onnx name: Cast

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator casts the elements of a given input tensor to a data type +specified by the ‘to’ argument and returns an output tensor of the same size in +the converted type. The ‘to’ argument must be one of the data types specified +in the ‘DataType’ enum field in the TensorProto message.

+

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations +(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may +result 100. There are some string literals reserved for special floating-point values; +“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, +this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors +to string tensors, plain floating-point representation (such as “314.15926”) would be used. +Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases +of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

+

Conversion from a numerical type to any numerical type is always allowed. +User must be aware of precision loss and value change caused by range difference between two types. +For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting +an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

+

In more detail, the conversion among numerical types should follow these rules:

+
    +
  • Casting from floating point to: +* floating point: +/- infinity if OOR (out of range). +* fixed point: undefined if OOR. +* bool: +/- 0.0 to False; all else to True.

  • +
  • Casting from fixed point to: +* floating point: +/- infinity if OOR. (+ infinity in the case of uint) +* fixed point: when OOR, discard higher bits and reinterpret (with respect to two’s complement representation for

    +
    +

    signed types). For example, 200 (int16) -> -56 (int8).

    +
    +
      +
    • bool: zero to False; nonzero to True.

    • +
    +
  • +
  • Casting from bool to: +* floating point: {1.0, 0.0}. +* fixed point: {1, 0}. +* bool: no change.

  • +
+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain input types. Casting from complex is not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16): Constrain output types. Casting to complex is not supported.

  • +
+
+ +
+
+
+
+

OnnxCast_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCast_6(*args, **kwargs)#
+

Version

+

Onnx name: Cast

+

This version of the operator has been available since +version 6.

+

Summary

+

The operator casts the elements of a given input tensor to a data type +specified by the ‘to’ argument and returns an output tensor of the same size in +the converted type. The ‘to’ argument must be one of the data types specified +in the ‘DataType’ enum field in the TensorProto message. +NOTE: Casting to and from strings is not supported yet.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Casting from strings and complex are not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Casting to strings and complex are not supported.

  • +
+
+ +
+
+
+
+

OnnxCast_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCast_9(*args, **kwargs)#
+

Version

+

Onnx name: Cast

+

This version of the operator has been available since +version 9.

+

Summary

+

The operator casts the elements of a given input tensor to a data type +specified by the ‘to’ argument and returns an output tensor of the same size in +the converted type. The ‘to’ argument must be one of the data types specified +in the ‘DataType’ enum field in the TensorProto message.

+

Casting from string tensor in plain (e.g., “3.14” and “1000”) and scientific numeric representations +(e.g., “1e-5” and “1E8”) to float types is supported. For example, converting string “100.5” to an integer may +result 100. There are some string literals reserved for special floating-point values; +“+INF” (and “INF”), “-INF”, and “NaN” are positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match “+INF” in a case-insensitive way would be mapped to positive infinite. Similarly, +this case-insensitive rule is applied to “INF” and “NaN”. When casting from numeric tensors +to string tensors, plain floating-point representation (such as “314.15926”) would be used. +Converting non-numerical-literal string such as “Hello World!” is an undefined behavior. Cases +of converting string representing floating-point arithmetic value, such as “2.718”, to INT is an undefined behavior.

+

Conversion from a numerical type to any numerical type is always allowed. +User must be aware of precision loss and value change caused by range difference between two types. +For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting +an integer 36 to Boolean may produce 1 because we truncate bits which can’t be stored in the targeted type.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to be cast.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with the same shape as input with type specified by the ‘to’ argument

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string): Constrain input types. Casting from complex is not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string): Constrain output types. Casting to complex is not supported.

  • +
+
+ +
+
+
+
+

OnnxCategoryMapper#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCategoryMapper(*args, **kwargs)#
+

Version

+

Onnx name: CategoryMapper

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Converts strings to integers and vice versa.

+

Two sequences of equal length are used to map between integers and strings, +with strings and integers at the same index detailing the mapping.

+

Each operator converts either integers to strings or strings to integers, depending +on which default value attribute is provided. Only one default value attribute +should be defined.

+

If the string default value is set, it will convert integers to strings. +If the int default value is set, it will convert strings to integers.

+

Attributes

+
    +
  • +
  • +
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_int64" i: -1 type: INT

  • +
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_string" s: "_Unused" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • +
+

Type Constraints

+
    +
  • T1 tensor(string), tensor(int64): The input must be a tensor of strings or integers, either [N,C] or [C].

  • +
  • T2 tensor(string), tensor(int64): The output is a tensor of strings or integers. Its shape will be the same as the input shape.

  • +
+
+ +
+
+
+
+

OnnxCategoryMapper_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCategoryMapper_1(*args, **kwargs)#
+

Version

+

Onnx name: CategoryMapper

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Converts strings to integers and vice versa.

+

Two sequences of equal length are used to map between integers and strings, +with strings and integers at the same index detailing the mapping.

+

Each operator converts either integers to strings or strings to integers, depending +on which default value attribute is provided. Only one default value attribute +should be defined.

+

If the string default value is set, it will convert integers to strings. +If the int default value is set, it will convert strings to integers.

+

Attributes

+
    +
  • +
  • +
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_int64" i: -1 type: INT

  • +
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_string" s: "_Unused" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • +
+

Type Constraints

+
    +
  • T1 tensor(string), tensor(int64): The input must be a tensor of strings or integers, either [N,C] or [C].

  • +
  • T2 tensor(string), tensor(int64): The output is a tensor of strings or integers. Its shape will be the same as the input shape.

  • +
+
+ +
+
+
+
+

OnnxCeil#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCeil(*args, **kwargs)#
+

Version

+

Onnx name: Ceil

+

This version of the operator has been available since +version 13.

+

Summary

+

Ceil takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the ceil is, y = ceil(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCeil_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCeil_1(*args, **kwargs)#
+

Version

+

Onnx name: Ceil

+

This version of the operator has been available since +version 1.

+

Summary

+

Ceil takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the ceil is, y = ceil(x), is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCeil_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCeil_13(*args, **kwargs)#
+

Version

+

Onnx name: Ceil

+

This version of the operator has been available since +version 13.

+

Summary

+

Ceil takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the ceil is, y = ceil(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCeil_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCeil_6(*args, **kwargs)#
+

Version

+

Onnx name: Ceil

+

This version of the operator has been available since +version 6.

+

Summary

+

Ceil takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the ceil is, y = ceil(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCelu(*args, **kwargs)#
+

Version

+

Onnx name: Celu

+

This version of the operator has been available since +version 12.

+

Summary

+

Continuously Differentiable Exponential Linear Units: +Perform the linear unit element-wise on the input tensor X +using formula:

+
max(0,x) + min(0,alpha*(exp(x/alpha)-1))
+
+
+

Attributes

+
    +
  • alpha: The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float): Constrain input and output types to float32 tensors.

  • +
+
+ +
+
+
+
+

OnnxCelu_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCelu_12(*args, **kwargs)#
+

Version

+

Onnx name: Celu

+

This version of the operator has been available since +version 12.

+

Summary

+

Continuously Differentiable Exponential Linear Units: +Perform the linear unit element-wise on the input tensor X +using formula:

+
max(0,x) + min(0,alpha*(exp(x/alpha)-1))
+
+
+

Attributes

+
    +
  • alpha: The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float): Constrain input and output types to float32 tensors.

  • +
+
+ +
+
+
+
+

OnnxCenterCropPad#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCenterCropPad(*args, **kwargs)#
+

Version

+

Onnx name: CenterCropPad

+

This version of the operator has been available since +version 18.

+

Summary

+

Center crop or pad an input to given dimensions.

+

The crop/pad dimensions can be specified for a subset of the axes. Non-specified dimensions will not be +cropped or padded.

+

If the input dimensions are bigger than the crop shape, a centered cropping window is extracted from the input. +If the input dimensions are smaller than the crop shape, the input is padded on each side equally, +so that the input is centered in the output.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input_data (heterogeneous)T: Input to extract the centered crop from.

  • +
  • shape (heterogeneous)Tind: 1-D tensor representing the cropping window dimensions.

  • +
+

Outputs

+
    +
  • output_data (heterogeneous)T: Output data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxCenterCropPad_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCenterCropPad_18(*args, **kwargs)#
+

Version

+

Onnx name: CenterCropPad

+

This version of the operator has been available since +version 18.

+

Summary

+

Center crop or pad an input to given dimensions.

+

The crop/pad dimensions can be specified for a subset of the axes. Non-specified dimensions will not be +cropped or padded.

+

If the input dimensions are bigger than the crop shape, a centered cropping window is extracted from the input. +If the input dimensions are smaller than the crop shape, the input is padded on each side equally, +so that the input is centered in the output.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input_data (heterogeneous)T: Input to extract the centered crop from.

  • +
  • shape (heterogeneous)Tind: 1-D tensor representing the cropping window dimensions.

  • +
+

Outputs

+
    +
  • output_data (heterogeneous)T: Output data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxClip#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 13.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified by the inputs ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max(), respectively.

+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • +
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxClip_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip_1(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 1.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified with arguments ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max() respectively.

+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxClip_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip_11(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 11.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified by the inputs ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max(), respectively.

+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • +
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxClip_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip_12(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 12.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified by the inputs ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max(), respectively.

+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • +
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxClip_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip_13(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 13.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified by the inputs ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max(), respectively.

+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
  • min (optional, heterogeneous)T: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).

  • +
  • max (optional, heterogeneous)T: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxClip_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxClip_6(*args, **kwargs)#
+

Version

+

Onnx name: Clip

+

This version of the operator has been available since +version 6.

+

Summary

+

Clip operator limits the given input within an interval. The interval is +specified with arguments ‘min’ and ‘max’. They default to +numeric_limits::lowest() and numeric_limits::max() respectively.

+

Attributes

+
    +
  • max: Maximum value, above which element is replaced by max Default value is +name: "max" f: 3.4028234663852886e+38 type: FLOAT

  • +
  • min: Minimum value, under which element is replaced by min Default value is +name: "min" f: -3.4028234663852886e+38 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor whose elements to be clipped

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor with clipped input elements

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCol2Im#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCol2Im(*args, **kwargs)#
+

Version

+

Onnx name: Col2Im

+

This version of the operator has been available since +version 18.

+

Summary

+

The operator rearranges column blocks back into a multidimensional image

+

Col2Im behaves similarly to PyTorch’s fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, +but it only supports batched multi-dimensional image tensors. +Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/.

+
+
NOTE:

Although specifying image_shape looks redundant because it could be calculated from +convolution formulas, it is required as input for more advanced scenarios as explained +at PyTorch’s implementation (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10)

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks.The blocks are enumerated in increasing lexicographic-order of their indices.For example, with an image-size 10*20 and block-size 9*18, there would be 2*3 blocks, enumerated in the order block(0, 0), block(0, 1), block(0, 2), block(1, 0), block(1, 1), block(1, 2).

  • +
  • image_shape (heterogeneous)tensor(int64): The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, …, dim_iN] for a N-D image.

  • +
  • block_shape (heterogeneous)tensor(int64): The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, …, dim_bN] for a N-D block.This is the block-shape before dilation is applied to it.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor produced by rearranging blocks into an image.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all numeric tensor types.

  • +
+
+ +
+
+
+
+

OnnxCol2Im_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCol2Im_18(*args, **kwargs)#
+

Version

+

Onnx name: Col2Im

+

This version of the operator has been available since +version 18.

+

Summary

+

The operator rearranges column blocks back into a multidimensional image

+

Col2Im behaves similarly to PyTorch’s fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, +but it only supports batched multi-dimensional image tensors. +Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/.

+
+
NOTE:

Although specifying image_shape looks redundant because it could be calculated from +convolution formulas, it is required as input for more advanced scenarios as explained +at PyTorch’s implementation (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10)

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks.The blocks are enumerated in increasing lexicographic-order of their indices.For example, with an image-size 10*20 and block-size 9*18, there would be 2*3 blocks, enumerated in the order block(0, 0), block(0, 1), block(0, 2), block(1, 0), block(1, 1), block(1, 2).

  • +
  • image_shape (heterogeneous)tensor(int64): The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, …, dim_iN] for a N-D image.

  • +
  • block_shape (heterogeneous)tensor(int64): The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, …, dim_bN] for a N-D block.This is the block-shape before dilation is applied to it.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor produced by rearranging blocks into an image.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all numeric tensor types.

  • +
+
+ +
+
+
+
+

OnnxCompress#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCompress(*args, **kwargs)#
+

Version

+

Onnx name: Compress

+

This version of the operator has been available since +version 11.

+

Summary

+

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. +In case axis is not provided, input is flattened before elements are selected. +Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(bool): Constrain to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxCompress_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCompress_11(*args, **kwargs)#
+

Version

+

Onnx name: Compress

+

This version of the operator has been available since +version 11.

+

Summary

+

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. +In case axis is not provided, input is flattened before elements are selected. +Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(bool): Constrain to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxCompress_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCompress_9(*args, **kwargs)#
+

Version

+

Onnx name: Compress

+

This version of the operator has been available since +version 9.

+

Summary

+

Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. +In case axis is not provided, input is flattened before elements are selected. +Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • condition (heterogeneous)T1: Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length alone the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(bool): Constrain to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxConcat#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcat(*args, **kwargs)#
+

Version

+

Onnx name: Concat

+

This version of the operator has been available since +version 13.

+

Summary

+

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConcatFromSequence#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence(*args, **kwargs)#
+

Version

+

Onnx name: ConcatFromSequence

+

This version of the operator has been available since +version 11.

+

Summary

+

Concatenate a sequence of tensors into a single tensor. +All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. +By default ‘new_axis’ is 0, the behavior is similar to numpy.concatenate. +When ‘new_axis’ is 1, the behavior is similar to numpy.stack.

+

Attributes

+
    +
  • +
  • new_axis: Insert and concatenate on a new axis or not, default 0 means do not insert new axis. Default value is +name: "new_axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Sequence of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any tensor type.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConcatFromSequence_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcatFromSequence_11(*args, **kwargs)#
+

Version

+

Onnx name: ConcatFromSequence

+

This version of the operator has been available since +version 11.

+

Summary

+

Concatenate a sequence of tensors into a single tensor. +All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. +By default ‘new_axis’ is 0, the behavior is similar to numpy.concatenate. +When ‘new_axis’ is 1, the behavior is similar to numpy.stack.

+

Attributes

+
    +
  • +
  • new_axis: Insert and concatenate on a new axis or not, default 0 means do not insert new axis. Default value is +name: "new_axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Sequence of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any tensor type.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConcat_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcat_1(*args, **kwargs)#
+

Version

+

Onnx name: Concat

+

This version of the operator has been available since +version 1.

+

Summary

+

Concatenate a list of tensors into a single tensor

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConcat_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcat_11(*args, **kwargs)#
+

Version

+

Onnx name: Concat

+

This version of the operator has been available since +version 11.

+

Summary

+

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConcat_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcat_13(*args, **kwargs)#
+

Version

+

Onnx name: Concat

+

This version of the operator has been available since +version 13.

+

Summary

+

Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConcat_4#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConcat_4(*args, **kwargs)#
+

Version

+

Onnx name: Concat

+

This version of the operator has been available since +version 4.

+

Summary

+

Concatenate a list of tensors into a single tensor

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: List of tensors for concatenation

  • +
+

Outputs

+
    +
  • concat_result (heterogeneous)T: Concatenated tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxConstant#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 13.

+

Summary

+

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, +or value_* must be specified.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxConstantOfShape#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstantOfShape(*args, **kwargs)#
+

Version

+

Onnx name: ConstantOfShape

+

This version of the operator has been available since +version 9.

+

Summary

+

Generate a tensor with given value and shape.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: 1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of shape specified by ‘input’.If attribute ‘value’ is specified, the value and datatype of the output tensor is taken from ‘value’.If attribute ‘value’ is not specified, the value in the output defaults to 0, and the datatype defaults to float32.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int64): Constrain input types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types to be numerics.

  • +
+
+ +
+
+
+
+

OnnxConstantOfShape_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstantOfShape_9(*args, **kwargs)#
+

Version

+

Onnx name: ConstantOfShape

+

This version of the operator has been available since +version 9.

+

Summary

+

Generate a tensor with given value and shape.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: 1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of shape specified by ‘input’.If attribute ‘value’ is specified, the value and datatype of the output tensor is taken from ‘value’.If attribute ‘value’ is not specified, the value in the output defaults to 0, and the datatype defaults to float32.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int64): Constrain input types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types to be numerics.

  • +
+
+ +
+
+
+
+

OnnxConstant_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant_1(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 1.

+

Summary

+

A constant tensor.

+

Attributes

+
    +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConstant_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant_11(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 11.

+

Summary

+

A constant tensor. Exactly one of the two attributes, either value or sparse_value, +must be specified.

+

Attributes

+
    +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxConstant_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant_12(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 12.

+

Summary

+

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, +or value_* must be specified.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxConstant_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant_13(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 13.

+

Summary

+

This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, +or value_* must be specified.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxConstant_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConstant_9(*args, **kwargs)#
+

Version

+

Onnx name: Constant

+

This version of the operator has been available since +version 9.

+

Summary

+

A constant tensor.

+

Attributes

+
    +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor containing the same value of the provided tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxConv#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConv(*args, **kwargs)#
+

Version

+

Onnx name: Conv

+

This version of the operator has been available since +version 11.

+

Summary

+

The convolution operator consumes an input tensor and a filter, and +computes the output.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G.

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConvInteger#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConvInteger(*args, **kwargs)#
+

Version

+

Onnx name: ConvInteger

+

This version of the operator has been available since +version 10.

+

Summary

+

The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, +and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • +
  • x_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘x’. It’s optional and default value is 0. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • w_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘w’. It’s optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M)

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input x and its zero point data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input w and its zero point data type to 8-bit integer tensor.

  • +
  • T3 tensor(int32): Constrain output y data type to 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxConvInteger_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConvInteger_10(*args, **kwargs)#
+

Version

+

Onnx name: ConvInteger

+

This version of the operator has been available since +version 10.

+

Summary

+

The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, +and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • +
  • x_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘x’. It’s optional and default value is 0. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • w_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘w’. It’s optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M)

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input x and its zero point data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input w and its zero point data type to 8-bit integer tensor.

  • +
  • T3 tensor(int32): Constrain output y data type to 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxConvTranspose#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConvTranspose(*args, **kwargs)#
+

Version

+

Onnx name: ConvTranspose

+

This version of the operator has been available since +version 11.

+

Summary

+

The convolution transpose operator consumes an input tensor and a filter, +and computes the output.

+

If the pads parameter is provided the shape of the output is calculated via the following equation:

+
+

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

+
+

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

+
+

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] +If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) +Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

+
+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = input_shape[i] * strides[i] for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConvTranspose_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConvTranspose_1(*args, **kwargs)#
+

Version

+

Onnx name: ConvTranspose

+

This version of the operator has been available since +version 1.

+

Summary

+

The convolution transpose operator consumes an input tensor and a filter, +and computes the output.

+

If the pads parameter is provided the shape of the output is calculated via the following equation:

+
+

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

+
+

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

+
+

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] +If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) +Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

+
+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConvTranspose_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConvTranspose_11(*args, **kwargs)#
+

Version

+

Onnx name: ConvTranspose

+

This version of the operator has been available since +version 11.

+

Summary

+

The convolution transpose operator consumes an input tensor and a filter, +and computes the output.

+

If the pads parameter is provided the shape of the output is calculated via the following equation:

+
+

output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]

+
+

output_shape can also be explicitly specified in which case pads values are auto generated using these equations:

+
+

total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] +If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) +Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).

+
+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = input_shape[i] * strides[i] for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn)

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x … x kn), where (k1 x k2 x … x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConv_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConv_1(*args, **kwargs)#
+

Version

+

Onnx name: Conv

+

This version of the operator has been available since +version 1.

+

Summary

+

The convolution operator consumes an input tensor and a filter, and +computes the output.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxConv_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxConv_11(*args, **kwargs)#
+

Version

+

Onnx name: Conv

+

This version of the operator has been available since +version 11.

+

Summary

+

The convolution operator consumes an input tensor and a filter, and +computes the output.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • W (heterogeneous)T: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G.

  • +
  • B (optional, heterogeneous)T: Optional 1D bias to be added to the convolution, has size of M.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCos#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCos(*args, **kwargs)#
+

Version

+

Onnx name: Cos

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the cosine of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The cosine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCos_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCos_7(*args, **kwargs)#
+

Version

+

Onnx name: Cos

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the cosine of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The cosine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCosh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCosh(*args, **kwargs)#
+

Version

+

Onnx name: Cosh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic cosine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic cosine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCosh_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCosh_9(*args, **kwargs)#
+

Version

+

Onnx name: Cosh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic cosine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic cosine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxCumSum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCumSum(*args, **kwargs)#
+

Version

+

Onnx name: CumSum

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs cumulative sum of the input elements along the given axis. +By default, it will do the sum inclusively meaning the first element is copied as is. +Through an exclusive attribute, this behavior can change to exclude the first element. +It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

+

Example:

+
input_x = [1, 2, 3]
+axis=0
+output = [1, 3, 6]
+exclusive=1
+output = [0, 1, 3]
+exclusive=0
+reverse=1
+output = [6, 5, 3]
+exclusive=1
+reverse=1
+output = [5, 3, 0]
+
+
+

Attributes

+
    +
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is +name: "exclusive" i: 0 type: INT

  • +
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is +name: "reverse" i: 0 type: INT

  • +
+

Inputs

+
    +
  • x (heterogeneous)T: An input tensor that is to be processed.

  • +
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • +
+
+ +
+
+
+
+

OnnxCumSum_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCumSum_11(*args, **kwargs)#
+

Version

+

Onnx name: CumSum

+

This version of the operator has been available since +version 11.

+

Summary

+

Performs cumulative sum of the input elements along the given axis. +By default, it will do the sum inclusively meaning the first element is copied as is. +Through an exclusive attribute, this behavior can change to exclude the first element. +It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

+

Example:

+
input_x = [1, 2, 3]
+axis=0
+output = [1, 3, 6]
+exclusive=1
+output = [0, 1, 3]
+exclusive=0
+reverse=1
+output = [6, 5, 3]
+exclusive=1
+reverse=1
+output = [5, 3, 0]
+
+
+

Attributes

+
    +
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is +name: "exclusive" i: 0 type: INT

  • +
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is +name: "reverse" i: 0 type: INT

  • +
+

Inputs

+
    +
  • x (heterogeneous)T: An input tensor that is to be processed.

  • +
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float), tensor(double): Input can be of any tensor type.

  • +
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • +
+
+ +
+
+
+
+

OnnxCumSum_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxCumSum_14(*args, **kwargs)#
+

Version

+

Onnx name: CumSum

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs cumulative sum of the input elements along the given axis. +By default, it will do the sum inclusively meaning the first element is copied as is. +Through an exclusive attribute, this behavior can change to exclude the first element. +It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to 1.

+

Example:

+
input_x = [1, 2, 3]
+axis=0
+output = [1, 3, 6]
+exclusive=1
+output = [0, 1, 3]
+exclusive=0
+reverse=1
+output = [6, 5, 3]
+exclusive=1
+reverse=1
+output = [5, 3, 0]
+
+
+

Attributes

+
    +
  • exclusive: If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Default value is +name: "exclusive" i: 0 type: INT

  • +
  • reverse: If set to 1 will perform the sums in reverse direction. Default value is +name: "reverse" i: 0 type: INT

  • +
+

Inputs

+
    +
  • x (heterogeneous)T: An input tensor that is to be processed.

  • +
  • axis (heterogeneous)T2: A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T: Output tensor of the same type as ‘x’ with cumulative sums of the x’s elements

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
  • T2 tensor(int32), tensor(int64): axis tensor can be int32 or int64 only

  • +
+
+ +
+
+
+
+

OnnxDFT#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDFT(*args, **kwargs)#
+

Version

+

Onnx name: DFT

+

This version of the operator has been available since +version 17.

+

Summary

+

Computes the discrete Fourier transform of input.

+

Attributes

+
    +
  • axis: The axis on which to perform the DFT. By default this value is set to 1, which corresponds to the first dimension after the batch index. Default value is +name: "axis" i: 1 type: INT

  • +
  • inverse: Whether to perform the inverse discrete fourier transform. By default this value is set to 0, which corresponds to false. Default value is +name: "inverse" i: 0 type: INT

  • +
  • onesided: If onesided is 1, only values for w in [0, 1, 2, …, floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT). When invoked with real or complex valued input, the default value is 0. Values can be 0 or 1. Default value is +name: "onesided" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T1: For real input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][1]. For complex input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][2]. The first dimension is the batch dimension. The following N dimentions correspond to the signal’s dimensions. The final dimension represents the real and imaginary parts of the value in that order.

  • +
  • dft_length (optional, heterogeneous)T2: The length of the signal.If greater than the axis dimension, the signal will be zero-padded up to dft_length. If less than the axis dimension, only the first dft_length values will be used as the signal. It’s an optional value.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: The Fourier Transform of the input vector.If onesided is 0, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][2]. If axis=1 and onesided is 1, the following shape is expected: [batch_idx][floor(signal_dim1/2)+1][signal_dim2]…[signal_dimN][2]. If axis=2 and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][floor(signal_dim2/2)+1]…[signal_dimN][2]. If axis=N and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[floor(signal_dimN/2)+1][2]. The signal_dim at the specified axis is equal to the dft_length.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain scalar length types to int64_t.

  • +
+
+ +
+
+
+
+

OnnxDFT_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDFT_17(*args, **kwargs)#
+

Version

+

Onnx name: DFT

+

This version of the operator has been available since +version 17.

+

Summary

+

Computes the discrete Fourier transform of input.

+

Attributes

+
    +
  • axis: The axis on which to perform the DFT. By default this value is set to 1, which corresponds to the first dimension after the batch index. Default value is +name: "axis" i: 1 type: INT

  • +
  • inverse: Whether to perform the inverse discrete fourier transform. By default this value is set to 0, which corresponds to false. Default value is +name: "inverse" i: 0 type: INT

  • +
  • onesided: If onesided is 1, only values for w in [0, 1, 2, …, floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT). When invoked with real or complex valued input, the default value is 0. Values can be 0 or 1. Default value is +name: "onesided" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T1: For real input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][1]. For complex input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][2]. The first dimension is the batch dimension. The following N dimentions correspond to the signal’s dimensions. The final dimension represents the real and imaginary parts of the value in that order.

  • +
  • dft_length (optional, heterogeneous)T2: The length of the signal.If greater than the axis dimension, the signal will be zero-padded up to dft_length. If less than the axis dimension, only the first dft_length values will be used as the signal. It’s an optional value.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: The Fourier Transform of the input vector.If onesided is 0, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[signal_dimN][2]. If axis=1 and onesided is 1, the following shape is expected: [batch_idx][floor(signal_dim1/2)+1][signal_dim2]…[signal_dimN][2]. If axis=2 and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][floor(signal_dim2/2)+1]…[signal_dimN][2]. If axis=N and onesided is 1, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]…[floor(signal_dimN/2)+1][2]. The signal_dim at the specified axis is equal to the dft_length.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain scalar length types to int64_t.

  • +
+
+ +
+
+
+
+

OnnxDepthToSpace#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace(*args, **kwargs)#
+

Version

+

Onnx name: DepthToSpace

+

This version of the operator has been available since +version 13.

+

Summary

+

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. +This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of +the input tensor where values from the depth dimension are moved in spatial blocks to the height +and width dimensions. By default, mode = DCR. +In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the +following order: depth, column, and then row. The output y is computed from the input x as below:

+
b, c, h, w = x.shape
+tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
+tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
+y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
+
+
+

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the +following order: column, row, and the depth. The output y is computed from the input x as below:

+
b, c, h, w = x.shape
+tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])
+tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])
+y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])
+
+
+

Attributes

+
    +
  • +
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is +name: "mode" s: "DCR" type: STRING

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxDepthToSpace_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_1(*args, **kwargs)#
+

Version

+

Onnx name: DepthToSpace

+

This version of the operator has been available since +version 1.

+

Summary

+

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. +This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of +the input tensor where values from the depth dimension are moved in spatial blocks to the height +and width dimensions.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxDepthToSpace_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_11(*args, **kwargs)#
+

Version

+

Onnx name: DepthToSpace

+

This version of the operator has been available since +version 11.

+

Summary

+

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. +This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of +the input tensor where values from the depth dimension are moved in spatial blocks to the height +and width dimensions. By default, mode = DCR. +In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the +following order: depth, column, and then row. The output y is computed from the input x as below:

+

b, c, h, w = x.shape

+

tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])

+

tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])

+

y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])

+

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the +following order: column, row, and the depth. The output y is computed from the input x as below:

+

b, c, h, w = x.shape

+

tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])

+

tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])

+

y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])

+

Attributes

+
    +
  • +
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is +name: "mode" s: "DCR" type: STRING

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxDepthToSpace_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDepthToSpace_13(*args, **kwargs)#
+

Version

+

Onnx name: DepthToSpace

+

This version of the operator has been available since +version 13.

+

Summary

+

DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. +This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of +the input tensor where values from the depth dimension are moved in spatial blocks to the height +and width dimensions. By default, mode = DCR. +In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the +following order: depth, column, and then row. The output y is computed from the input x as below:

+
b, c, h, w = x.shape
+tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
+tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
+y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
+
+
+

In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the +following order: column, row, and the depth. The output y is computed from the input x as below:

+
b, c, h, w = x.shape
+tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])
+tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])
+y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])
+
+
+

Attributes

+
    +
  • +
  • mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. Default value is +name: "mode" s: "DCR" type: STRING

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxDequantizeLinear#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear(*args, **kwargs)#
+

Version

+

Onnx name: DequantizeLinear

+

This version of the operator has been available since +version 13.

+

Summary

+

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. +The dequantization formula is y = (x - x_zero_point) * x_scale. x_scale and x_zero_point must have same shape, and can be either a scalar +for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. +x_zero_point and x must have same type. x and y must have same shape. In the case of dequantizing int32, +there’s no zero point (zero point is supposed to be 0).

+

Attributes

+
    +
  • axis: (Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • +
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization.

  • +
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. Shape must match x_scale. It’s optional. Zero point is 0 when it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxDequantizeLinear_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_10(*args, **kwargs)#
+

Version

+

Onnx name: DequantizeLinear

+

This version of the operator has been available since +version 10.

+

Summary

+

The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor. +The dequantization formula is y = (x - x_zero_point) * x_scale. ‘x_scale’ and ‘x_zero_point’ are both scalars. +‘x_zero_point’ and ‘x’ must have same type. ‘x’ and ‘y’ must have same shape. In the case of dequantizing int32, +there’s no zero point (zero point is supposed to be 0).

+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • +
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization. It’s optional. 0 is the default value when it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxDequantizeLinear_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDequantizeLinear_13(*args, **kwargs)#
+

Version

+

Onnx name: DequantizeLinear

+

This version of the operator has been available since +version 13.

+

Summary

+

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. +The dequantization formula is y = (x - x_zero_point) * x_scale. x_scale and x_zero_point must have same shape, and can be either a scalar +for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. +x_zero_point and x must have same type. x and y must have same shape. In the case of dequantizing int32, +there’s no zero point (zero point is supposed to be 0).

+

Attributes

+
    +
  • axis: (Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T: N-D quantized input tensor to be de-quantized.

  • +
  • x_scale (heterogeneous)tensor(float): Scale for input ‘x’. It can be a scalar, which means a per-tensor/layer dequantization, or a 1-D tensor for per-axis dequantization.

  • +
  • x_zero_point (optional, heterogeneous)T: Zero point for input ‘x’. Shape must match x_scale. It’s optional. Zero point is 0 when it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)tensor(float): N-D full precision output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T tensor(int8), tensor(uint8), tensor(int32): Constrain ‘x_zero_point’ and ‘x’ to 8-bit/32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxDet#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDet(*args, **kwargs)#
+

Version

+

Onnx name: Det

+

This version of the operator has been available since +version 11.

+

Summary

+

Det calculates determinant of a square matrix or batches of square matrices. +Det takes one input tensor of shape [*, M, M], where * is zero or more batch dimensions, +and the inner-most 2 dimensions form square matrices. +The output is a tensor of shape [*], containing the determinants of all input submatrices. +e.g., When the input is 2-D, the output is a scalar(shape is empty: []).

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to floating-point tensors.

  • +
+
+ +
+
+
+
+

OnnxDet_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDet_11(*args, **kwargs)#
+

Version

+

Onnx name: Det

+

This version of the operator has been available since +version 11.

+

Summary

+

Det calculates determinant of a square matrix or batches of square matrices. +Det takes one input tensor of shape [*, M, M], where * is zero or more batch dimensions, +and the inner-most 2 dimensions form square matrices. +The output is a tensor of shape [*], containing the determinants of all input submatrices. +e.g., When the input is 2-D, the output is a scalar(shape is empty: []).

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to floating-point tensors.

  • +
+
+ +
+
+
+
+

OnnxDictVectorizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDictVectorizer(*args, **kwargs)#
+

Version

+

Onnx name: DictVectorizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Uses an index mapping to convert a dictionary to an array.

+

Given a dictionary, each key is looked up in the vocabulary attribute corresponding to +the key type. The index into the vocabulary array at which the key is found is then +used to index the output 1-D tensor ‘Y’ and insert into it the value found in the dictionary ‘X’.

+

The key type of the input map must correspond to the element type of the defined vocabulary attribute. +Therefore, the output array will be equal in length to the index mapping vector parameter. +All keys in the input dictionary must be present in the index mapping vector. +For each item in the input dictionary, insert its value in the output array. +Any keys not present in the input dictionary, will be zero in the output array.

+

For example: if the string_vocabulary parameter is set to ["a", "c", "b", "z"], +then an input of {"a": 4, "c": 8} will produce an output of [4, 8, 0, 0].

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: A dictionary.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: A 1-D tensor holding values from the input dictionary.

  • +
+

Type Constraints

+
    +
  • T1 map(string, int64), map(int64, string), map(int64, float), map(int64, double), map(string, float), map(string, double): The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.

  • +
  • T2 tensor(int64), tensor(float), tensor(double), tensor(string): The output will be a tensor of the value type of the input map. It’s shape will be [1,C], where C is the length of the input dictionary.

  • +
+
+ +
+
+
+
+

OnnxDictVectorizer_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDictVectorizer_1(*args, **kwargs)#
+

Version

+

Onnx name: DictVectorizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Uses an index mapping to convert a dictionary to an array.

+

Given a dictionary, each key is looked up in the vocabulary attribute corresponding to +the key type. The index into the vocabulary array at which the key is found is then +used to index the output 1-D tensor ‘Y’ and insert into it the value found in the dictionary ‘X’.

+

The key type of the input map must correspond to the element type of the defined vocabulary attribute. +Therefore, the output array will be equal in length to the index mapping vector parameter. +All keys in the input dictionary must be present in the index mapping vector. +For each item in the input dictionary, insert its value in the output array. +Any keys not present in the input dictionary, will be zero in the output array.

+

For example: if the string_vocabulary parameter is set to ["a", "c", "b", "z"], +then an input of {"a": 4, "c": 8} will produce an output of [4, 8, 0, 0].

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: A dictionary.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: A 1-D tensor holding values from the input dictionary.

  • +
+

Type Constraints

+
    +
  • T1 map(string, int64), map(int64, string), map(int64, float), map(int64, double), map(string, float), map(string, double): The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.

  • +
  • T2 tensor(int64), tensor(float), tensor(double), tensor(string): The output will be a tensor of the value type of the input map. It’s shape will be [1,C], where C is the length of the input dictionary.

  • +
+
+ +
+
+
+
+

OnnxDiv#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary division (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxDiv_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv_1(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 1.

+

Summary

+

Performs element-wise binary division (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxDiv_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv_13(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary division (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxDiv_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv_14(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary division (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxDiv_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv_6(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 6.

+

Summary

+

Performs element-wise binary division (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxDiv_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDiv_7(*args, **kwargs)#
+

Version

+

Onnx name: Div

+

This version of the operator has been available since +version 7.

+

Summary

+

Performs element-wise binary division (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 13.

+

Summary

+

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, +output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; +Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, +the user can simply not pass training_mode input or set it to false.

+
output = scale * data * mask,
+
+
+

where

+
scale = 1. / (1. - ratio).
+
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • +
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T2: The output mask.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • +
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_1(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 1.

+

Summary

+

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, +output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in +test mode or not, the output Y will either be a random dropout, or a simple +copy of the input. Note that our implementation of Dropout does scaling in +the training phase, so during testing nothing needs to be done.

+

Attributes

+
    +
  • +
  • is_test: (int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X. Default value is +name: "is_test" i: 0 type: INT

  • +
  • ratio: (float, default 0.5) the ratio of random dropout Default value is +name: "ratio" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T: The output mask. If is_test is nonzero, this output is not filled.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_10(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 10.

+

Summary

+

Dropout takes one input floating tensor and produces two tensor outputs, +output (floating tensor) and mask (Tensor<bool>). Depending on whether it is +in test mode or not, the output Y will either be a random dropout, or a simple +copy of the input. Note that our implementation of Dropout does scaling in +the training phase, so during testing nothing needs to be done. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • ratio: The ratio of random dropout Default value is +name: "ratio" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T1: The output mask.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(bool): Constrain output mask types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_12(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 12.

+

Summary

+

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, +output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; +Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, +the user can simply not pass training_mode input or set it to false.

+
output = scale * data * mask,
+
+
+

where

+
scale = 1. / (1. - ratio).
+
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • +
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T2: The output mask.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • +
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_13(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 13.

+

Summary

+

Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, +output (floating-point tensor) and mask (optional Tensor<bool>). If training_mode is true then the output Y will be a random dropout; +Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, +the user can simply not pass training_mode input or set it to false.

+
output = scale * data * mask,
+
+
+

where

+
scale = 1. / (1. - ratio).
+
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 3 inputs.

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
  • ratio (optional, heterogeneous)T1: The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it’s non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.

  • +
  • training_mode (optional, heterogeneous)T2: If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T2: The output mask.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input ‘ratio’ types to float tensors.

  • +
  • T2 tensor(bool): Constrain output ‘mask’ types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_6(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 6.

+

Summary

+

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, +output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in +test mode or not, the output Y will either be a random dropout, or a simple +copy of the input. Note that our implementation of Dropout does scaling in +the training phase, so during testing nothing needs to be done.

+

Attributes

+
    +
  • is_test: (int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X. Default value is +name: "is_test" i: 0 type: INT

  • +
  • ratio: (float, default 0.5) the ratio of random dropout Default value is +name: "ratio" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T: The output mask. If is_test is nonzero, this output is not filled.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxDropout_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDropout_7(*args, **kwargs)#
+

Version

+

Onnx name: Dropout

+

This version of the operator has been available since +version 7.

+

Summary

+

Dropout takes one input data (Tensor<float>) and produces two Tensor outputs, +output (Tensor<float>) and mask (Tensor<bool>). Depending on whether it is in +test mode or not, the output Y will either be a random dropout, or a simple +copy of the input. Note that our implementation of Dropout does scaling in +the training phase, so during testing nothing needs to be done. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • ratio: The ratio of random dropout Default value is +name: "ratio" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: The output.

  • +
  • mask (optional, heterogeneous)T: The output mask.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxDynamicQuantizeLinear#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear(*args, **kwargs)#
+

Version

+

Onnx name: DynamicQuantizeLinear

+

This version of the operator has been available since +version 11.

+

Summary

+

A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. +Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. +Scale is calculated as:

+
y_scale = (max(x) - min(x))/(qmax - qmin)
+
+
+
    +
  • where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8

  • +
  • data range is adjusted to include 0.

  • +
+

Zero point is calculated as:

+
intermediate_zero_point = qmin - min(x)/y_scale
+y_zero_point = cast(round(saturate(itermediate_zero_point)))
+
+
+
    +
  • where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8

  • +
  • for saturation, it saturates to [0, 255] if it’s uint8, or [-127, 127] if it’s int8. Right now only uint8 is supported.

  • +
  • rounding to nearest ties to even.

  • +
+

Data quantization formula is:

+
y = saturate (round (x / y_scale) + y_zero_point)
+
+
+
    +
  • for saturation, it saturates to [0, 255] if it’s uint8, or [-127, 127] if it’s int8. Right now only uint8 is supported.

  • +
  • rounding to nearest ties to even.

  • +
+

Inputs

+
    +
  • x (heterogeneous)T1: Input tensor

  • +
+

Outputs

+
    +
  • y (heterogeneous)T2: Quantized output tensor

  • +
  • y_scale (heterogeneous)tensor(float): Output scale. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • y_zero_point (heterogeneous)T2: Output zero point. It’s a scalar, which means a per-tensor/layer quantization.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float): Constrain ‘x’ to float tensor.

  • +
  • T2 tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit unsigned integer tensor.

  • +
+
+ +
+
+
+
+

OnnxDynamicQuantizeLinear_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxDynamicQuantizeLinear_11(*args, **kwargs)#
+

Version

+

Onnx name: DynamicQuantizeLinear

+

This version of the operator has been available since +version 11.

+

Summary

+

A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. +Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. +Scale is calculated as:

+
y_scale = (max(x) - min(x))/(qmax - qmin)
+
+
+
    +
  • where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8

  • +
  • data range is adjusted to include 0.

  • +
+

Zero point is calculated as:

+
intermediate_zero_point = qmin - min(x)/y_scale
+y_zero_point = cast(round(saturate(itermediate_zero_point)))
+
+
+
    +
  • where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8

  • +
  • for saturation, it saturates to [0, 255] if it’s uint8, or [-127, 127] if it’s int8. Right now only uint8 is supported.

  • +
  • rounding to nearest ties to even.

  • +
+

Data quantization formula is:

+
y = saturate (round (x / y_scale) + y_zero_point)
+
+
+
    +
  • for saturation, it saturates to [0, 255] if it’s uint8, or [-127, 127] if it’s int8. Right now only uint8 is supported.

  • +
  • rounding to nearest ties to even.

  • +
+

Inputs

+
    +
  • x (heterogeneous)T1: Input tensor

  • +
+

Outputs

+
    +
  • y (heterogeneous)T2: Quantized output tensor

  • +
  • y_scale (heterogeneous)tensor(float): Output scale. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • y_zero_point (heterogeneous)T2: Output zero point. It’s a scalar, which means a per-tensor/layer quantization.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float): Constrain ‘x’ to float tensor.

  • +
  • T2 tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit unsigned integer tensor.

  • +
+
+ +
+
+
+
+

OnnxEinsum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEinsum(*args, **kwargs)#
+

Version

+

Onnx name: Einsum

+

This version of the operator has been available since +version 12.

+

Summary

+

An einsum of the form term1, term2 -> output-term produces an output tensor using the following equation

+
output[output-term] = reduce-sum( input1[term1] * input2[term] )
+
+
+

where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2) +that do not occur in the output-term.

+

The Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation +convention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to +an operand tensor, and the characters within the terms correspond to operands dimensions.

+

This sequence may be followed by “->” to separate the left and right hand side of the equation. +If the equation contains “->” followed by the right-hand side, the explicit (not classical) form of the Einstein +summation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases, +output indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the +equation.

+

When a dimension character is repeated in the left-hand side, it represents summation along the dimension.

+

The equation may contain ellipsis (”…”) to enable broadcasting. Ellipsis must indicate a fixed number of dimensions. +Specifically, every occurrence of ellipsis in the equation must represent the same number of dimensions. +The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the +beginning of the output. The equation string may contain space (U+0020) character.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • Inputs (variadic, heterogeneous)T: Operands

  • +
+

Outputs

+
    +
  • Output (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numerical tensor types.

  • +
+
+ +
+
+
+
+

OnnxEinsum_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEinsum_12(*args, **kwargs)#
+

Version

+

Onnx name: Einsum

+

This version of the operator has been available since +version 12.

+

Summary

+

An einsum of the form term1, term2 -> output-term produces an output tensor using the following equation

+
output[output-term] = reduce-sum( input1[term1] * input2[term] )
+
+
+

where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2) +that do not occur in the output-term.

+

The Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation +convention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to +an operand tensor, and the characters within the terms correspond to operands dimensions.

+

This sequence may be followed by “->” to separate the left and right hand side of the equation. +If the equation contains “->” followed by the right-hand side, the explicit (not classical) form of the Einstein +summation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases, +output indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the +equation.

+

When a dimension character is repeated in the left-hand side, it represents summation along the dimension.

+

The equation may contain ellipsis (”…”) to enable broadcasting. Ellipsis must indicate a fixed number of dimensions. +Specifically, every occurrence of ellipsis in the equation must represent the same number of dimensions. +The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the +beginning of the output. The equation string may contain space (U+0020) character.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • Inputs (variadic, heterogeneous)T: Operands

  • +
+

Outputs

+
    +
  • Output (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numerical tensor types.

  • +
+
+ +
+
+
+
+

OnnxElu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxElu(*args, **kwargs)#
+

Version

+

Onnx name: Elu

+

This version of the operator has been available since +version 6.

+

Summary

+

Elu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < +0, f(x) = x for x >= 0., is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of ELU. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: 1D input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: 1D output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxElu_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxElu_1(*args, **kwargs)#
+

Version

+

Onnx name: Elu

+

This version of the operator has been available since +version 1.

+

Summary

+

Elu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < +0, f(x) = x for x >= 0., is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of ELU default to 1.0. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: 1D input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: 1D input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxElu_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxElu_6(*args, **kwargs)#
+

Version

+

Onnx name: Elu

+

This version of the operator has been available since +version 6.

+

Summary

+

Elu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the function f(x) = alpha * (exp(x) - 1.) for x < +0, f(x) = x for x >= 0., is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of ELU. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: 1D input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: 1D output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxEqual#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 19.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(string): Constrain input types to all (non-complex) tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxEqual_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual_1(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(int32), tensor(int64): Constrain input to integral tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxEqual_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual_11(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 11.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxEqual_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual_13(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxEqual_19#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual_19(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 19.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(string): Constrain input types to all (non-complex) tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxEqual_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEqual_7(*args, **kwargs)#
+

Version

+

Onnx name: Equal

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool), tensor(int32), tensor(int64): Constrain input to integral tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxErf#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxErf(*args, **kwargs)#
+

Version

+

Onnx name: Erf

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the error function of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxErf_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxErf_13(*args, **kwargs)#
+

Version

+

Onnx name: Erf

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the error function of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxErf_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxErf_9(*args, **kwargs)#
+

Version

+

Onnx name: Erf

+

This version of the operator has been available since +version 9.

+

Summary

+

Computes the error function of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The error function of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxExp#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExp(*args, **kwargs)#
+

Version

+

Onnx name: Exp

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the exponential of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxExp_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExp_1(*args, **kwargs)#
+

Version

+

Onnx name: Exp

+

This version of the operator has been available since +version 1.

+

Summary

+

Calculates the exponential of the given input tensor, element-wise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxExp_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExp_13(*args, **kwargs)#
+

Version

+

Onnx name: Exp

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the exponential of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxExp_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExp_6(*args, **kwargs)#
+

Version

+

Onnx name: Exp

+

This version of the operator has been available since +version 6.

+

Summary

+

Calculates the exponential of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The exponential of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxExpand#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExpand(*args, **kwargs)#
+

Version

+

Onnx name: Expand

+

This version of the operator has been available since +version 13.

+

Summary

+

Broadcast the input tensor following the given shape and the broadcast rule. +The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): +Dimensions are right alignment; +Two corresponding dimensions must have the same value, or one of them is equal to 1. +Also, this operator is similar to numpy.broadcast_to(input, shape), +but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). +It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, +or the shape.ndim < input.shape.ndim.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • +
+
+ +
+
+
+
+

OnnxExpand_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExpand_13(*args, **kwargs)#
+

Version

+

Onnx name: Expand

+

This version of the operator has been available since +version 13.

+

Summary

+

Broadcast the input tensor following the given shape and the broadcast rule. +The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): +Dimensions are right alignment; +Two corresponding dimensions must have the same value, or one of them is equal to 1. +Also, this operator is similar to numpy.broadcast_to(input, shape), +but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). +It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, +or the shape.ndim < input.shape.ndim.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • +
+
+ +
+
+
+
+

OnnxExpand_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxExpand_8(*args, **kwargs)#
+

Version

+

Onnx name: Expand

+

This version of the operator has been available since +version 8.

+

Summary

+

Broadcast the input tensor following the given shape and the broadcast rule. +The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): +Dimensions are right alignment; +Two corresponding dimensions must have the same value, or one of them is equal to 1. +Also, this operator is similar to numpy.broadcast_to(input, shape), +but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). +It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, +or the shape.ndim < input.shape.ndim.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
  • shape (heterogeneous)tensor(int64): A 1-D tensor indicates the shape you want to expand to, following the broadcast rule

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensors.

  • +
+
+ +
+
+
+
+

OnnxEyeLike#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEyeLike(*args, **kwargs)#
+

Version

+

Onnx name: EyeLike

+

This version of the operator has been available since +version 9.

+

Summary

+

Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D +tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the +same as the input tensor. The data type can be specified by the ‘dtype’ argument. If +‘dtype’ is not specified, then the type of input tensor is used. By default, the main diagonal +is populated with ones, but attribute ‘k’ can be used to populate upper or lower diagonals. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message and be valid as an output type.

+

Attributes

+
    +
  • +
  • k: (Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal. Default value is +name: "k" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: 2D input tensor to copy shape, and optionally, type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor, same shape as input tensor T1.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Strings and complex are not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Strings and complex are not supported.

  • +
+
+ +
+
+
+
+

OnnxEyeLike_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxEyeLike_9(*args, **kwargs)#
+

Version

+

Onnx name: EyeLike

+

This version of the operator has been available since +version 9.

+

Summary

+

Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D +tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the +same as the input tensor. The data type can be specified by the ‘dtype’ argument. If +‘dtype’ is not specified, then the type of input tensor is used. By default, the main diagonal +is populated with ones, but attribute ‘k’ can be used to populate upper or lower diagonals. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message and be valid as an output type.

+

Attributes

+
    +
  • +
  • k: (Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal. Default value is +name: "k" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: 2D input tensor to copy shape, and optionally, type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor, same shape as input tensor T1.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain input types. Strings and complex are not supported.

  • +
  • T2 tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool): Constrain output types. Strings and complex are not supported.

  • +
+
+ +
+
+
+
+

OnnxFeatureVectorizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer(*args, **kwargs)#
+

Version

+

Onnx name: FeatureVectorizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Concatenates input tensors into one continuous output.

+

All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. +Inputs are copied to the output maintaining the order of the input arguments.

+

All inputs must be integers or floats, while the output will be all floating point values.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • X (variadic, heterogeneous)T1: An ordered collection of tensors, all with the same element type.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): The output array, elements ordered as the inputs.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64), tensor(float), tensor(double): The input type must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxFeatureVectorizer_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFeatureVectorizer_1(*args, **kwargs)#
+

Version

+

Onnx name: FeatureVectorizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Concatenates input tensors into one continuous output.

+

All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. +Inputs are copied to the output maintaining the order of the input arguments.

+

All inputs must be integers or floats, while the output will be all floating point values.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • X (variadic, heterogeneous)T1: An ordered collection of tensors, all with the same element type.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): The output array, elements ordered as the inputs.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64), tensor(float), tensor(double): The input type must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxFlatten#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFlatten(*args, **kwargs)#
+

Version

+

Onnx name: Flatten

+

This version of the operator has been available since +version 13.

+

Summary

+

Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, … d_n) then the output will have shape +(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

+

Attributes

+
    +
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: A tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxFlatten_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFlatten_1(*args, **kwargs)#
+

Version

+

Onnx name: Flatten

+

This version of the operator has been available since +version 1.

+

Summary

+

Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, … d_n) then the output will have shape +(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

+

Attributes

+
    +
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: A tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxFlatten_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFlatten_11(*args, **kwargs)#
+

Version

+

Onnx name: Flatten

+

This version of the operator has been available since +version 11.

+

Summary

+

Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, … d_n) then the output will have shape +(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

+

Attributes

+
    +
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: A tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxFlatten_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFlatten_13(*args, **kwargs)#
+

Version

+

Onnx name: Flatten

+

This version of the operator has been available since +version 13.

+

Summary

+

Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, … d_n) then the output will have shape +(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

+

Attributes

+
    +
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: A tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxFlatten_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFlatten_9(*args, **kwargs)#
+

Version

+

Onnx name: Flatten

+

This version of the operator has been available since +version 9.

+

Summary

+

Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, … d_n) then the output will have shape +(d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).

+

Attributes

+
    +
  • axis: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 … d_n), where the shape of the input tensor is (d_0, d_1, … d_n). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: A tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxFloor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFloor(*args, **kwargs)#
+

Version

+

Onnx name: Floor

+

This version of the operator has been available since +version 13.

+

Summary

+

Floor takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the floor is, y = floor(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxFloor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFloor_1(*args, **kwargs)#
+

Version

+

Onnx name: Floor

+

This version of the operator has been available since +version 1.

+

Summary

+

Floor takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the floor is, y = floor(x), is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxFloor_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFloor_13(*args, **kwargs)#
+

Version

+

Onnx name: Floor

+

This version of the operator has been available since +version 13.

+

Summary

+

Floor takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the floor is, y = floor(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxFloor_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxFloor_6(*args, **kwargs)#
+

Version

+

Onnx name: Floor

+

This version of the operator has been available since +version 6.

+

Summary

+

Floor takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the floor is, y = floor(x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGRU#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGRU(*args, **kwargs)#
+

Version

+

Onnx name: GRU

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer GRU. This operator is usually supported via some custom +implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • z - update gate

  • +
  • r - reset gate

  • +
  • h - hidden gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • W[zrh] - W parameter weight matrix for update, reset, and hidden gates

  • +
  • R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

  • +
  • Wb[zrh] - W bias vectors for update, reset, and hidden gates

  • +
  • Rb[zrh] - R bias vectors for update, reset, and hidden gates

  • +
  • WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

  • +
  • RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

  • +
  • WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

  • +
  • RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+
+
NOTE:

Below are optional

+
+
+
    +
  • Affine(x) - alpha * x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha * Tanh(beta * x)

  • +
  • HardSigmoid(x) - min(max(alpha * x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha * (e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Sigmoid, g=Tanh):

+
    +
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • +
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • +
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is +name: "linear_before_reset" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxGRU_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGRU_1(*args, **kwargs)#
+

Version

+

Onnx name: GRU

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes an one-layer GRU. This operator is usually supported via some custom +implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

z - update gate

+

r - reset gate

+

h - hidden gate

+

t - time step (t-1 means previous time step)

+

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

+

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

+

Wb[zrh] - W bias vectors for update, reset, and hidden gates

+

Rb[zrh] - R bias vectors for update, reset, and hidden gates

+

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

+

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

+

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

+

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Sigmoid, g=Tanh):

+
+
    +
  • zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)

  • +
  • rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0

  • +
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • +
+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "foward" type: STRING

  • +
  • +
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is +name: "output_sequence" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • +
  • Y_h (heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxGRU_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGRU_14(*args, **kwargs)#
+

Version

+

Onnx name: GRU

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer GRU. This operator is usually supported via some custom +implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • z - update gate

  • +
  • r - reset gate

  • +
  • h - hidden gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • W[zrh] - W parameter weight matrix for update, reset, and hidden gates

  • +
  • R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

  • +
  • Wb[zrh] - W bias vectors for update, reset, and hidden gates

  • +
  • Rb[zrh] - R bias vectors for update, reset, and hidden gates

  • +
  • WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

  • +
  • RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

  • +
  • WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

  • +
  • RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+
+
NOTE:

Below are optional

+
+
+
    +
  • Affine(x) - alpha * x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha * Tanh(beta * x)

  • +
  • HardSigmoid(x) - min(max(alpha * x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha * (e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Sigmoid, g=Tanh):

+
    +
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • +
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • +
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is +name: "linear_before_reset" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxGRU_3#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGRU_3(*args, **kwargs)#
+

Version

+

Onnx name: GRU

+

This version of the operator has been available since +version 3.

+

Summary

+

Computes an one-layer GRU. This operator is usually supported via some custom +implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

z - update gate

+

r - reset gate

+

h - hidden gate

+

t - time step (t-1 means previous time step)

+

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

+

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

+

Wb[zrh] - W bias vectors for update, reset, and hidden gates

+

Rb[zrh] - R bias vectors for update, reset, and hidden gates

+

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

+

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

+

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

+

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Sigmoid, g=Tanh):

+
+
    +
  • zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)

  • +
  • rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0

  • +
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • +
+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is +name: "linear_before_reset" i: 0 type: INT

  • +
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is +name: "output_sequence" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxGRU_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGRU_7(*args, **kwargs)#
+

Version

+

Onnx name: GRU

+

This version of the operator has been available since +version 7.

+

Summary

+

Computes an one-layer GRU. This operator is usually supported via some custom +implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

z - update gate

+

r - reset gate

+

h - hidden gate

+

t - time step (t-1 means previous time step)

+

W[zrh] - W parameter weight matrix for update, reset, and hidden gates

+

R[zrh] - R recurrence weight matrix for update, reset, and hidden gates

+

Wb[zrh] - W bias vectors for update, reset, and hidden gates

+

Rb[zrh] - R bias vectors for update, reset, and hidden gates

+

WB[zrh] - W parameter weight matrix for backward update, reset, and hidden gates

+

RB[zrh] - R recurrence weight matrix for backward update, reset, and hidden gates

+

WBb[zrh] - W bias vectors for backward update, reset, and hidden gates

+

RBb[zrh] - R bias vectors for backward update, reset, and hidden gates

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Sigmoid, g=Tanh):

+
+
    +
  • zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)

  • +
  • rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0

  • +
  • ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0

  • +
  • Ht = (1 - zt) (.) ht + zt (.) Ht-1

  • +
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. Default value is +name: "linear_before_reset" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[zrh] and WB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[zrh] and RB[zrh] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 3*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for the gates. Concatenation of [Wb[zrh], Rb[zrh]] and [WBb[zrh], RBb[zrh]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 6*hidden_size]. Optional: If not specified - assumed to be 0

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxGather#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGather(*args, **kwargs)#
+

Version

+

Onnx name: Gather

+

This version of the operator has been available since +version 13.

+

Summary

+

Given data tensor of rank r >= 1, and indices tensor of rank q, gather +entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates +them in an output tensor of rank q + (r - 1).

+

If axis = 0, let k = indices[i_{0}, …, i_{q-1}] +then output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]:

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+indices = [
+    [0, 1],
+    [1, 2],
+]
+output = [
+    [
+        [1.0, 1.2],
+        [2.3, 3.4],
+    ],
+    [
+        [2.3, 3.4],
+        [4.5, 5.7],
+    ],
+]
+
+
+

If axis = 1, let k = indices[i_{0}, …, i_{q-1}] +then output[j_{0}, i_{0}, …, i_{q-1}, j_{1}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]:

+
data = [
+    [1.0, 1.2, 1.9],
+    [2.3, 3.4, 3.9],
+    [4.5, 5.7, 5.9],
+]
+indices = [
+    [0, 2],
+]
+axis = 1,
+output = [
+        [[1.0, 1.9]],
+        [[2.3, 3.9]],
+        [[4.5, 5.9]],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGatherElements#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherElements(*args, **kwargs)#
+

Version

+

Onnx name: GatherElements

+

This version of the operator has been available since +version 13.

+

Summary

+

GatherElements takes two inputs data and indices of the same rank r >= 1 +and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). It is an indexing operation +that produces its output by indexing into the input data tensor at index +positions determined by elements of the indices tensor. +Its output shape is the same as the shape of indices and consists of one value +(gathered from the data) for each element in indices.

+

For instance, in the 3-D case (r = 3), the output produced is determined +by the following equations:

+
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
+out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
+out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
+
+
+

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

+

Example 1:

+
data = [
+    [1, 2],
+    [3, 4],
+]
+indices = [
+    [0, 0],
+    [1, 0],
+]
+axis = 1
+output = [
+    [1, 1],
+    [4, 3],
+]
+
+
+

Example 2:

+
data = [
+    [1, 2, 3],
+    [4, 5, 6],
+    [7, 8, 9],
+]
+indices = [
+    [1, 2, 0],
+    [2, 0, 0],
+]
+axis = 0
+output = [
+    [4, 8, 3],
+    [7, 2, 3],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGatherElements_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherElements_11(*args, **kwargs)#
+

Version

+

Onnx name: GatherElements

+

This version of the operator has been available since +version 11.

+

Summary

+

GatherElements takes two inputs data and indices of the same rank r >= 1 +and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). It is an indexing operation +that produces its output by indexing into the input data tensor at index +positions determined by elements of the indices tensor. +Its output shape is the same as the shape of indices and consists of one value +(gathered from the data) for each element in indices.

+

For instance, in the 3-D case (r = 3), the output produced is determined +by the following equations:

+
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
+out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
+out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
+
+
+

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

+

Example 1:

+
data = [
+    [1, 2],
+    [3, 4],
+]
+indices = [
+    [0, 0],
+    [1, 0],
+]
+axis = 1
+output = [
+    [
+      [1, 1],
+      [4, 3],
+    ],
+]
+
+
+

Example 2:

+
data = [
+    [1, 2, 3],
+    [4, 5, 6],
+    [7, 8, 9],
+]
+indices = [
+    [1, 2, 0],
+    [2, 0, 0],
+]
+axis = 0
+output = [
+    [
+      [4, 8, 3],
+      [7, 2, 3],
+    ],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGatherElements_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherElements_13(*args, **kwargs)#
+

Version

+

Onnx name: GatherElements

+

This version of the operator has been available since +version 13.

+

Summary

+

GatherElements takes two inputs data and indices of the same rank r >= 1 +and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). It is an indexing operation +that produces its output by indexing into the input data tensor at index +positions determined by elements of the indices tensor. +Its output shape is the same as the shape of indices and consists of one value +(gathered from the data) for each element in indices.

+

For instance, in the 3-D case (r = 3), the output produced is determined +by the following equations:

+
out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,
+out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,
+out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,
+
+
+

This operator is also the inverse of ScatterElements. It is similar to Torch’s gather operation.

+

Example 1:

+
data = [
+    [1, 2],
+    [3, 4],
+]
+indices = [
+    [0, 0],
+    [1, 0],
+]
+axis = 1
+output = [
+    [1, 1],
+    [4, 3],
+]
+
+
+

Example 2:

+
data = [
+    [1, 2, 3],
+    [4, 5, 6],
+    [7, 8, 9],
+]
+indices = [
+    [1, 2, 0],
+    [2, 0, 0],
+]
+axis = 0
+output = [
+    [4, 8, 3],
+    [7, 2, 3],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of the same shape as indices.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGatherND#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherND(*args, **kwargs)#
+

Version

+

Onnx name: GatherND

+

This version of the operator has been available since +version 13.

+

Summary

+

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers +slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

+

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, +where each element defines a slice of data

+

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of +data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

+

Some salient points about the inputs’ rank and shape:

+
    +
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. +
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. +
  5. b < min(q, r) is to be honored.

  6. +
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. +
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. +It is an error if any of the index values are out of bounds.

  10. +
+

The output is computed as follows:

+

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

+
    +
  1. If indices_shape[-1] > r-b => error condition

  2. +
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors +containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions +of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] +is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. +
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor +containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding +to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor +to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. +
+

This operator is the inverse of ScatterND.

+

Example 1

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

+

output = [0,3] # output_shape = [2]

+
+

Example 2

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[0,1]] # output_shape = [2, 2]

+
+

Example 3

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Example 4

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

+

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

+
+

Example 5

+
+

batch_dims = 1

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Attributes

+
    +
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is +name: "batch_dims" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxGatherND_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherND_11(*args, **kwargs)#
+

Version

+

Onnx name: GatherND

+

This version of the operator has been available since +version 11.

+

Summary

+

Given data tensor of rank r >= 1, and indices tensor of rank q >= 1, this operator gathers +slices of data into an output tensor of rank q + r - indices_shape[-1] - 1.

+

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, +where each element defines a slice of data

+

Some salient points about the inputs’ rank and shape:

+
    +
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. +
  3. The indices_shape[-1] should have a value between 1 (inclusive) and rank r (inclusive)

  4. +
  5. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. +It is an error if any of the index values are out of bounds.

  6. +
+

The output is computed as follows:

+

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

+
    +
  1. If indices_shape[-1] > r => error condition

  2. +
  3. If indices_shape[-1] == r, since the rank of indices is q, indices can be thought of as a (q-1)-dimensional tensor +containing 1-D tensors of dimension r. Let us think of each such r ranked tensor as indices_slice. +Each scalar value corresponding to data[indices_slice] is filled into the corresponding location of the (q-1)-dimensional tensor +to form the output tensor (Example 1 below)

  4. +
  5. If indices_shape[-1] < r, since the rank of indices is q, indices can be thought of as a (q-1)-dimensional tensor +containing 1-D tensors of dimension < r. Let us think of each such tensors as indices_slice. +Each tensor slice corresponding to data[indices_slice , :] is filled into the corresponding location of the (q-1)-dimensional tensor +to form the output tensor (Examples 2, 3, and 4 below)

  6. +
+

This operator is the inverse of ScatterND.

+

Example 1

+
+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

+

output = [0,3] # output_shape = [2]

+
+

Example 2

+
+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[0,1]] # output_shape = [2, 2]

+
+

Example 3

+
+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Example 4

+
+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

+

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

+
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxGatherND_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherND_12(*args, **kwargs)#
+

Version

+

Onnx name: GatherND

+

This version of the operator has been available since +version 12.

+

Summary

+

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers +slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

+

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, +where each element defines a slice of data

+

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of +data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

+

Some salient points about the inputs’ rank and shape:

+
    +
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. +
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. +
  5. b < min(q, r) is to be honored.

  6. +
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. +
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. +It is an error if any of the index values are out of bounds.

  10. +
+

The output is computed as follows:

+

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

+
    +
  1. If indices_shape[-1] > r-b => error condition

  2. +
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors +containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions +of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] +is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. +
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor +containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding +to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor +to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. +
+

This operator is the inverse of ScatterND.

+

Example 1

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

+

output = [0,3] # output_shape = [2]

+
+

Example 2

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[0,1]] # output_shape = [2, 2]

+
+

Example 3

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Example 4

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

+

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

+
+

Example 5

+
+

batch_dims = 1

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Attributes

+
    +
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is +name: "batch_dims" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxGatherND_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGatherND_13(*args, **kwargs)#
+

Version

+

Onnx name: GatherND

+

This version of the operator has been available since +version 13.

+

Summary

+

Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers +slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.

+

indices is an q-dimensional integer tensor, best thought of as a (q-1)-dimensional tensor of index-tuples into data, +where each element defines a slice of data

+

batch_dims (denoted as b) is an integer indicating the number of batch dimensions, i.e the leading b number of dimensions of +data tensor and indices are representing the batches, and the gather starts from the b+1 dimension.

+

Some salient points about the inputs’ rank and shape:

+
    +
  1. r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks r and q

  2. +
  3. The first b dimensions of the shape of indices tensor and data tensor must be equal.

  4. +
  5. b < min(q, r) is to be honored.

  6. +
  7. The indices_shape[-1] should have a value between 1 (inclusive) and rank r-b (inclusive)

  8. +
  9. All values in indices are expected to be within bounds [-s, s-1] along axis of size s (i.e.) -data_shape[i] <= indices[…,i] <= data_shape[i] - 1. +It is an error if any of the index values are out of bounds.

  10. +
+

The output is computed as follows:

+

The output tensor is obtained by mapping each index-tuple in the indices tensor to the corresponding slice of the input data.

+
    +
  1. If indices_shape[-1] > r-b => error condition

  2. +
  3. If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors +containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions +of the indices_shape. Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] +is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)

  4. +
  5. If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor +containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding +to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor +to form the output tensor (Examples 2, 3, 4 and 5 below)

  6. +
+

This operator is the inverse of ScatterND.

+

Example 1

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[0,0],[1,1]] # indices_shape = [2, 2]

+

output = [0,3] # output_shape = [2]

+
+

Example 2

+
+

batch_dims = 0

+

data = [[0,1],[2,3]] # data_shape = [2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[0,1]] # output_shape = [2, 2]

+
+

Example 3

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[0,1],[1,0]] # indices_shape = [2, 2]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Example 4

+
+

batch_dims = 0

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]

+

output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2]

+
+

Example 5

+
+

batch_dims = 1

+

data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]

+

indices = [[1],[0]] # indices_shape = [2, 1]

+

output = [[2,3],[4,5]] # output_shape = [2, 2]

+
+

Attributes

+
    +
  • batch_dims: The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:] Default value is +name: "batch_dims" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxGather_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGather_1(*args, **kwargs)#
+

Version

+

Onnx name: Gather

+

This version of the operator has been available since +version 1.

+

Summary

+

Given data tensor of rank r >= 1, and indices tensor of rank q, gather +entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates +them in an output tensor of rank q + (r - 1). +Example 1:

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+indices = [
+    [0, 1],
+    [1, 2],
+]
+output = [
+    [
+        [1.0, 1.2],
+        [2.3, 3.4],
+    ],
+    [
+        [2.3, 3.4],
+        [4.5, 5.7],
+    ],
+]
+
+
+

Example 2:

+
data = [
+    [1.0, 1.2, 1.9],
+    [2.3, 3.4, 3.9],
+    [4.5, 5.7, 5.9],
+]
+indices = [
+    [0, 2],
+]
+axis = 1,
+output = [
+    [[1.0, 1.9]],
+    [[2.3, 3.9]],
+    [[4.5, 5.9]],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGather_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGather_11(*args, **kwargs)#
+

Version

+

Onnx name: Gather

+

This version of the operator has been available since +version 11.

+

Summary

+

Given data tensor of rank r >= 1, and indices tensor of rank q, gather +entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates +them in an output tensor of rank q + (r - 1).

+

axis = 0 :

+

Let +k = indices[i_{0}, …, i_{q-1}] +Then +output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+indices = [
+    [0, 1],
+    [1, 2],
+]
+output = [
+    [
+        [1.0, 1.2],
+        [2.3, 3.4],
+    ],
+    [
+        [2.3, 3.4],
+        [4.5, 5.7],
+    ],
+]
+
+
+

axis = 1 :

+

Let +k = indices[i_{0}, …, i_{q-1}] +Then +output[j_{0}, i_{0}, …, i_{q-1}, j_{1}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]

+
data = [
+    [1.0, 1.2, 1.9],
+    [2.3, 3.4, 3.9],
+    [4.5, 5.7, 5.9],
+]
+indices = [
+    [0, 2],
+]
+axis = 1,
+output = [
+    [[1.0, 1.9]],
+    [[2.3, 3.9]],
+    [[4.5, 5.9]],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGather_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGather_13(*args, **kwargs)#
+

Version

+

Onnx name: Gather

+

This version of the operator has been available since +version 13.

+

Summary

+

Given data tensor of rank r >= 1, and indices tensor of rank q, gather +entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates +them in an output tensor of rank q + (r - 1).

+

If axis = 0, let k = indices[i_{0}, …, i_{q-1}] +then output[i_{0}, …, i_{q-1}, j_{0}, …, j_{r-2}] = input[k , j_{0}, …, j_{r-2}]:

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+indices = [
+    [0, 1],
+    [1, 2],
+]
+output = [
+    [
+        [1.0, 1.2],
+        [2.3, 3.4],
+    ],
+    [
+        [2.3, 3.4],
+        [4.5, 5.7],
+    ],
+]
+
+
+

If axis = 1, let k = indices[i_{0}, …, i_{q-1}] +then output[j_{0}, i_{0}, …, i_{q-1}, j_{1}, …, j_{r-2}] = input[j_{0}, k, j_{1}, …, j_{r-2}]:

+
data = [
+    [1.0, 1.2, 1.9],
+    [2.3, 3.4, 3.9],
+    [4.5, 5.7, 5.9],
+]
+indices = [
+    [0, 2],
+]
+axis = 1,
+output = [
+        [[1.0, 1.9]],
+        [[2.3, 3.9]],
+        [[4.5, 5.9]],
+]
+
+
+

Attributes

+
    +
  • axis: Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank q + (r - 1).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxGemm#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 13.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

+
    +
  • A’ = transpose(A) if transA else A

  • +
  • B’ = transpose(B) if transB else B

  • +
+

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), +input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), +and output tensor Y has shape (M, N). A will be transposed before doing the +computation if attribute transA is non-zero, same for B and transB. +This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • +
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • +
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_1(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 1.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 +Compute Y = alpha * A * B + beta * C, where input tensor A has +dimension (M X K), input tensor B has dimension (K X N), input tensor C and +output tensor Y have dimension (M X N). +If attribute broadcast is non-zero, input tensor C will be broadcasted to match +the dimension requirement. A will be transposed before doing the computation +if attribute transA is non-zero, same for B and transB.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B, the default value is 1.0. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C, the default value is 1.0. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • broadcast: Whether C should be broadcasted Default value is +name: "broadcast" i: 0 type: INT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Input tensor A

  • +
  • B (heterogeneous)T: Input tensor B

  • +
  • C (heterogeneous)T: Input tensor C, can be inplace.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_11(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 11.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

+

A’ = transpose(A) if transA else A

+

B’ = transpose(B) if transB else B

+

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), +input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), +and output tensor Y has shape (M, N). A will be transposed before doing the +computation if attribute transA is non-zero, same for B and transB. +This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • +
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • +
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_13(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 13.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

+
    +
  • A’ = transpose(A) if transA else A

  • +
  • B’ = transpose(B) if transB else B

  • +
+

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), +input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), +and output tensor Y has shape (M, N). A will be transposed before doing the +computation if attribute transA is non-zero, same for B and transB. +This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX. +This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • +
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • +
  • C (optional, heterogeneous)T: Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_6(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 6.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 +Compute Y = alpha * A * B + beta * C, where input tensor A has +dimension (M X K), input tensor B has dimension (K X N), input tensor C and +output tensor Y have dimension (M X N). +If attribute broadcast is non-zero, input tensor C will be broadcasted to match +the dimension requirement. A will be transposed before doing the computation +if attribute transA is non-zero, same for B and transB.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B, the default value is 1.0. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C, the default value is 1.0. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • broadcast: Whether C should be broadcasted Default value is +name: "broadcast" i: 0 type: INT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Input tensor A

  • +
  • B (heterogeneous)T: Input tensor B

  • +
  • C (heterogeneous)T: Input tensor C

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_7(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 7.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

+

A’ = transpose(A) if transA else A

+

B’ = transpose(B) if transB else B

+

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), +input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), +and output tensor Y has shape (M, N). A will be transposed before doing the +computation if attribute transA is non-zero, same for B and transB. +This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • +
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • +
  • C (heterogeneous)T: Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGemm_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGemm_9(*args, **kwargs)#
+

Version

+

Onnx name: Gemm

+

This version of the operator has been available since +version 9.

+

Summary

+

General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3

+

A’ = transpose(A) if transA else A

+

B’ = transpose(B) if transB else B

+

Compute Y = alpha * A’ * B’ + beta * C, where input tensor A has shape (M, K) or (K, M), +input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), +and output tensor Y has shape (M, N). A will be transposed before doing the +computation if attribute transA is non-zero, same for B and transB. +This operator supports unidirectional broadcasting (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • alpha: Scalar multiplier for the product of input tensors A * B. Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
  • beta: Scalar multiplier for input tensor C. Default value is +name: "beta" f: 1.0 type: FLOAT

  • +
  • transA: Whether A should be transposed Default value is +name: "transA" i: 0 type: INT

  • +
  • transB: Whether B should be transposed Default value is +name: "transB" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.

  • +
  • B (heterogeneous)T: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.

  • +
  • C (heterogeneous)T: Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor of shape (M, N).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalAveragePool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool(*args, **kwargs)#
+

Version

+

Onnx name: GlobalAveragePool

+

This version of the operator has been available since +version 1.

+

Summary

+

GlobalAveragePool consumes an input tensor X and applies average pooling across +the values in the same channel. This is equivalent to AveragePool with kernel size +equal to the spatial dimension of input tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalAveragePool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalAveragePool_1(*args, **kwargs)#
+

Version

+

Onnx name: GlobalAveragePool

+

This version of the operator has been available since +version 1.

+

Summary

+

GlobalAveragePool consumes an input tensor X and applies average pooling across +the values in the same channel. This is equivalent to AveragePool with kernel size +equal to the spatial dimension of input tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalLpPool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool(*args, **kwargs)#
+

Version

+

Onnx name: GlobalLpPool

+

This version of the operator has been available since +version 2.

+

Summary

+

GlobalLpPool consumes an input tensor X and applies lp pool pooling across +the values in the same channel. This is equivalent to LpPool with kernel size +equal to the spatial dimension of input tensor.

+

Attributes

+
    +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalLpPool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_1(*args, **kwargs)#
+

Version

+

Onnx name: GlobalLpPool

+

This version of the operator has been available since +version 1.

+

Summary

+

GlobalLpPool consumes an input tensor X and applies lp pool pooling across the +the values in the same channel. This is equivalent to LpPool with kernel size +equal to the spatial dimension of input tensor.

+

Attributes

+
    +
  • p: p value of the Lp norm used to pool over the input data, default is 2.0. Default value is +name: "p" f: 2.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. Dimensions will be N x C x 1 x 1

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalLpPool_2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalLpPool_2(*args, **kwargs)#
+

Version

+

Onnx name: GlobalLpPool

+

This version of the operator has been available since +version 2.

+

Summary

+

GlobalLpPool consumes an input tensor X and applies lp pool pooling across +the values in the same channel. This is equivalent to LpPool with kernel size +equal to the spatial dimension of input tensor.

+

Attributes

+
    +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalMaxPool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool(*args, **kwargs)#
+

Version

+

Onnx name: GlobalMaxPool

+

This version of the operator has been available since +version 1.

+

Summary

+

GlobalMaxPool consumes an input tensor X and applies max pooling across +the values in the same channel. This is equivalent to MaxPool with kernel size +equal to the spatial dimension of input tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGlobalMaxPool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGlobalMaxPool_1(*args, **kwargs)#
+

Version

+

Onnx name: GlobalMaxPool

+

This version of the operator has been available since +version 1.

+

Summary

+

GlobalMaxPool consumes an input tensor X and applies max pooling across +the values in the same channel. This is equivalent to MaxPool with kernel size +equal to the spatial dimension of input tensor.

+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGradient#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGradient(*args, **kwargs)#
+

Version

+

Onnx name: Gradient

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Gradient operator computes the partial derivatives of a specific tensor w.r.t. +some other tensors. This operator is widely used in gradient-based training +algorithms. To illustrate its use, let’s consider a computation graph,

+
X -----.
+       |
+       v
+W --> Conv --> H --> Gemm --> Y
+                      ^
+                      |
+                      Z
+
+
+

, where W and Z are trainable tensors. Note that operators’ attributes are +omitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of +Y with respect to W (Z). The user can compute gradient by inserting Gradient +operator to form another graph shown below.

+
W --> Conv --> H --> Gemm --> Y
+|      ^              ^
+|      |              |
+|      X              Z
+|      |              |
+|      |   .----------'
+|      |   |  (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in
+|      |   |   "xs" followed by "zs")
+|      v   v
+'---> Gradient(xs=["W", "Z"], zs=["X"], y="Y")
+       |   |
+       |   '-----------------------------------> dY/dW (1st output of Gradient)
+       |
+       '---------------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

By definition, the tensor “y” is a function of independent variables in “xs” +and “zs”. Since we only compute the gradient of “y” w.r.t. the differentiable +variables in “xs”, this Gradient only outputs dY/dW and dY/dZ. Note that “H” +cannot appear in “xs” and “zs”. The reason is that “H” can be determined by +tensors “W” and “X” and therefore “H” is not an independent variable.

+

All outputs are optional. If needed, for example, user can assign an empty +string to the 1st output name of that Gradient to skip the generation of dY/dW. +Note that the concept of optional outputs can also be found in ONNX’s RNN, GRU, +and LSTM.

+

Gradient operator can compute derivative against intermediate tensors. For +example, the gradient of Y with respect to H can be done via

+
W --> Conv --> H --> Gemm --> Y
+       ^       |      ^
+       |       |      |
+       X       |      Z
+       .-------'      |
+       |   .----------'
+       |   | (H/Z is the 1st/2nd input of Gradient as shown in "xs")
+       v   v
+      Gradient(xs=["H", "Z"], y="Y")
+       |   |
+       |   '-----------------------------------> dY/dH (1st output of Gradient)
+       |
+       '---------------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

It is possible to represent high-order differentiation using Gradient operators. +For example, given the following linear model:

+
W --> Gemm --> Y --> Loss --> O
+       ^              ^
+       |              |
+       X              L
+
+
+

To compute the 2nd order derivative of O with respect to W (denoted by +d^2O/dW^2), one can do

+
W --> Gemm --> Y --> Loss --> O
+|      ^              ^
+|      |              |
+|      X .------------L
+|      | |            |
+|      | |            v
++------+-+> Gradient(xs=["X", "W"], zs=["L"], y="O") ---> dO/dX (1st output of Gradient)
+|      | |    |
+|      | |    '---> dO/dW (2nd output of Gradient)
+|      v v
+'---> Gradient(xs=["X", "W"], zs=["L"], y="dO/dW") ---> d(dO/dW)dX (1st output of
+       |                                                  Gradient)
+       |
+       |
+       '---> d^2O/dW^2 (2nd output of Gradient)
+
+
+

The tensors named in attributes “xs”, “zs”, and “y” define the differentiated +computation graph, and the inputs to Gradient node define the values at +which the gradient is computed. We can feed different tensors to the identified +graph. For example, one can compute the gradient of Y with respect to H at +a specific value of H, H_1, by providing that value as an input to the Gradient +node.

+
W --> Conv --> H --> Gemm --> Y
+       ^              ^
+       |              |
+       X              Z
+
+          Z_1 (2nd input of Gradient)
+           |
+           v
+H_1 --> Gradient(xs=["H", "Z"], y="Y") ---> dY/dH when H = H_1 and Y = Y_1.
+           |
+           '------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

When the inputs of Gradient are the tensors named in “xs” and “zs”, the +computation can be optimized. More specifically, intermediate variables in +forward pass can be reused if the gradient is computed via reverse-mode +auto-differentiation.

+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • Inputs (variadic)T1: The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute “xs” and the attribute “zs”. For example, if xs=[“A”, “B”] and zs=[“C”], the first input is used as the value of symbol “A” and the 3rd input is substituted for all the occurrences of “C”.

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • Outputs (variadic)T2: The gradient of the tensor specified by the attribute “y” with respect to each of tensors specified in the attribute “xs”. The i-th output is the gradient of “y” with respect to the i-th tensor specified in the attribute “xs”.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Allow outputs to be any kind of tensor.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Allow inputs to be any kind of floating-point tensor.

  • +
+
+ +
+
+
+
+

OnnxGradient_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGradient_1(*args, **kwargs)#
+

Version

+

Onnx name: Gradient

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Gradient operator computes the partial derivatives of a specific tensor w.r.t. +some other tensors. This operator is widely used in gradient-based training +algorithms. To illustrate its use, let’s consider a computation graph,

+
X -----.
+       |
+       v
+W --> Conv --> H --> Gemm --> Y
+                      ^
+                      |
+                      Z
+
+
+

, where W and Z are trainable tensors. Note that operators’ attributes are +omitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of +Y with respect to W (Z). The user can compute gradient by inserting Gradient +operator to form another graph shown below.

+
W --> Conv --> H --> Gemm --> Y
+|      ^              ^
+|      |              |
+|      X              Z
+|      |              |
+|      |   .----------'
+|      |   |  (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in
+|      |   |   "xs" followed by "zs")
+|      v   v
+'---> Gradient(xs=["W", "Z"], zs=["X"], y="Y")
+       |   |
+       |   '-----------------------------------> dY/dW (1st output of Gradient)
+       |
+       '---------------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

By definition, the tensor “y” is a function of independent variables in “xs” +and “zs”. Since we only compute the gradient of “y” w.r.t. the differentiable +variables in “xs”, this Gradient only outputs dY/dW and dY/dZ. Note that “H” +cannot appear in “xs” and “zs”. The reason is that “H” can be determined by +tensors “W” and “X” and therefore “H” is not an independent variable.

+

All outputs are optional. If needed, for example, user can assign an empty +string to the 1st output name of that Gradient to skip the generation of dY/dW. +Note that the concept of optional outputs can also be found in ONNX’s RNN, GRU, +and LSTM.

+

Gradient operator can compute derivative against intermediate tensors. For +example, the gradient of Y with respect to H can be done via

+
W --> Conv --> H --> Gemm --> Y
+       ^       |      ^
+       |       |      |
+       X       |      Z
+       .-------'      |
+       |   .----------'
+       |   | (H/Z is the 1st/2nd input of Gradient as shown in "xs")
+       v   v
+      Gradient(xs=["H", "Z"], y="Y")
+       |   |
+       |   '-----------------------------------> dY/dH (1st output of Gradient)
+       |
+       '---------------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

It is possible to represent high-order differentiation using Gradient operators. +For example, given the following linear model:

+
W --> Gemm --> Y --> Loss --> O
+       ^              ^
+       |              |
+       X              L
+
+
+

To compute the 2nd order derivative of O with respect to W (denoted by +d^2O/dW^2), one can do

+
W --> Gemm --> Y --> Loss --> O
+|      ^              ^
+|      |              |
+|      X .------------L
+|      | |            |
+|      | |            v
++------+-+> Gradient(xs=["X", "W"], zs=["L"], y="O") ---> dO/dX (1st output of Gradient)
+|      | |    |
+|      | |    '---> dO/dW (2nd output of Gradient)
+|      v v
+'---> Gradient(xs=["X", "W"], zs=["L"], y="dO/dW") ---> d(dO/dW)dX (1st output of
+       |                                                  Gradient)
+       |
+       |
+       '---> d^2O/dW^2 (2nd output of Gradient)
+
+
+

The tensors named in attributes “xs”, “zs”, and “y” define the differentiated +computation graph, and the inputs to Gradient node define the values at +which the gradient is computed. We can feed different tensors to the identified +graph. For example, one can compute the gradient of Y with respect to H at +a specific value of H, H_1, by providing that value as an input to the Gradient +node.

+
W --> Conv --> H --> Gemm --> Y
+       ^              ^
+       |              |
+       X              Z
+
+          Z_1 (2nd input of Gradient)
+           |
+           v
+H_1 --> Gradient(xs=["H", "Z"], y="Y") ---> dY/dH when H = H_1 and Y = Y_1.
+           |
+           '------------------------------> dY/dZ (2nd output of Gradient)
+
+
+

When the inputs of Gradient are the tensors named in “xs” and “zs”, the +computation can be optimized. More specifically, intermediate variables in +forward pass can be reused if the gradient is computed via reverse-mode +auto-differentiation.

+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • Inputs (variadic)T1: The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute “xs” and the attribute “zs”. For example, if xs=[“A”, “B”] and zs=[“C”], the first input is used as the value of symbol “A” and the 3rd input is substituted for all the occurrences of “C”.

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • Outputs (variadic)T2: The gradient of the tensor specified by the attribute “y” with respect to each of tensors specified in the attribute “xs”. The i-th output is the gradient of “y” with respect to the i-th tensor specified in the attribute “xs”.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Allow outputs to be any kind of tensor.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Allow inputs to be any kind of floating-point tensor.

  • +
+
+ +
+
+
+
+

OnnxGreater#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreater(*args, **kwargs)#
+

Version

+

Onnx name: Greater

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the tensor resulted from performing the greater logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreaterOrEqual#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual(*args, **kwargs)#
+

Version

+

Onnx name: GreaterOrEqual

+

This version of the operator has been available since +version 16.

+

Summary

+

Returns the tensor resulted from performing the greater_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreaterOrEqual_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_12(*args, **kwargs)#
+

Version

+

Onnx name: GreaterOrEqual

+

This version of the operator has been available since +version 12.

+

Summary

+

Returns the tensor resulted from performing the greater_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreaterOrEqual_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreaterOrEqual_16(*args, **kwargs)#
+

Version

+

Onnx name: GreaterOrEqual

+

This version of the operator has been available since +version 16.

+

Summary

+

Returns the tensor resulted from performing the greater_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreater_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreater_1(*args, **kwargs)#
+

Version

+

Onnx name: Greater

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the greater logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input to float tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreater_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreater_13(*args, **kwargs)#
+

Version

+

Onnx name: Greater

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the tensor resulted from performing the greater logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreater_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreater_7(*args, **kwargs)#
+

Version

+

Onnx name: Greater

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the greater logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input to float tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGreater_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGreater_9(*args, **kwargs)#
+

Version

+

Onnx name: Greater

+

This version of the operator has been available since +version 9.

+

Summary

+

Returns the tensor resulted from performing the greater logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxGridSample#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGridSample(*args, **kwargs)#
+

Version

+

Onnx name: GridSample

+

This version of the operator has been available since +version 16.

+

Summary

+

Given an input X and a flow-field grid, computes the output Y using X values and pixel locations from grid. +Currently, only spatial (4-D) inputs are supported. For input X with shape (N, C, H, W) and grid with shape (N, H_out, W_out, 2), +the output Y will have shape (N, C, H_out, W_out).

+

The tensor X contains values at centers of square pixels in a H by W 2-dimensional image. +The tensor grid describes normalized positions where the output Y is to be computed +using a specified interpolation method (the mode) and a padding mode (for grid positions falling outside the 2-dimensional image).

+

Elements in grid[N, H_out, W_out] are size-2 vectors specifying positions in the 2-dimensional space of X. +They are used to interpolate output values of Y[N, C, H_out, W_out].

+

The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). +See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).

+

Attributes

+
    +
  • align_corners: If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default value is +name: "align_corners" i: 0 type: INT

  • +
  • mode: Three interpolation modes: bilinear (default), nearest and bicubic. Default value is +name: "mode" s: "bilinear" type: STRING

  • +
  • padding_mode: Support padding modes for outside grid values: zeros`(default), `border, reflection. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x’ = 1.5, then reflects by border 1 and becomes x’’ = 0.5. Default value is +name: "padding_mode" s: "zeros" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the numbers of channels, H and W are the height and width of the input data.

  • +
  • grid (heterogeneous)T2: Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and W_out are the height and width of grid and output, Grid specifies the sampling pixel locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: 4-D tensor of shape (N, C, H_out, W_out) of sampled values. For integer input types, intermediate values are computed as floating point and cast to integer at the end.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input X and output Y types to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain grid types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGridSample_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGridSample_16(*args, **kwargs)#
+

Version

+

Onnx name: GridSample

+

This version of the operator has been available since +version 16.

+

Summary

+

Given an input X and a flow-field grid, computes the output Y using X values and pixel locations from grid. +Currently, only spatial (4-D) inputs are supported. For input X with shape (N, C, H, W) and grid with shape (N, H_out, W_out, 2), +the output Y will have shape (N, C, H_out, W_out).

+

The tensor X contains values at centers of square pixels in a H by W 2-dimensional image. +The tensor grid describes normalized positions where the output Y is to be computed +using a specified interpolation method (the mode) and a padding mode (for grid positions falling outside the 2-dimensional image).

+

Elements in grid[N, H_out, W_out] are size-2 vectors specifying positions in the 2-dimensional space of X. +They are used to interpolate output values of Y[N, C, H_out, W_out].

+

The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). +See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample).

+

Attributes

+
    +
  • align_corners: If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input’s corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic. Default value is +name: "align_corners" i: 0 type: INT

  • +
  • mode: Three interpolation modes: bilinear (default), nearest and bicubic. Default value is +name: "mode" s: "bilinear" type: STRING

  • +
  • padding_mode: Support padding modes for outside grid values: zeros`(default), `border, reflection. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x’ = 1.5, then reflects by border 1 and becomes x’’ = 0.5. Default value is +name: "padding_mode" s: "zeros" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the numbers of channels, H and W are the height and width of the input data.

  • +
  • grid (heterogeneous)T2: Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and W_out are the height and width of grid and output, Grid specifies the sampling pixel locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: 4-D tensor of shape (N, C, H_out, W_out) of sampled values. For integer input types, intermediate values are computed as floating point and cast to integer at the end.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input X and output Y types to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain grid types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGroupNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGroupNormalization(*args, **kwargs)#
+

Version

+

Onnx name: GroupNormalization

+

This version of the operator has been available since +version 18.

+

Summary

+

A GroupNormalization function. Carries out group normalization as described in +the paper https://arxiv.org/abs/1803.08494

+

This operator transforms input according to

+
y = scale * (x - mean) / sqrt(variance + epsilon) + bias,
+
+
+

where the mean and variance are computed per instance per group of channels, and +scale and bias should be specified for each group of channels. The number of +groups num_groups should be divisible by the number of channels so that there are +an equal number of channels per group.

+

When the number of groups is the same as the number of channels, this operator is +equivalent to InstanceNormalization. When there is only one group, this operator +is equivalent to LayerNormalization.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor. Dimensions for image cases are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width of the data. Statistics are computed for every group of channels over C, H, and W. For non-image cases, the dimensions are in the form of (N x C x D1 x D2 … Dn).

  • +
  • scale (heterogeneous)T: Scale tensor of shape (num_groups).

  • +
  • bias (heterogeneous)T: Bias tensor of shape (num_groups).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxGroupNormalization_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxGroupNormalization_18(*args, **kwargs)#
+

Version

+

Onnx name: GroupNormalization

+

This version of the operator has been available since +version 18.

+

Summary

+

A GroupNormalization function. Carries out group normalization as described in +the paper https://arxiv.org/abs/1803.08494

+

This operator transforms input according to

+
y = scale * (x - mean) / sqrt(variance + epsilon) + bias,
+
+
+

where the mean and variance are computed per instance per group of channels, and +scale and bias should be specified for each group of channels. The number of +groups num_groups should be divisible by the number of channels so that there are +an equal number of channels per group.

+

When the number of groups is the same as the number of channels, this operator is +equivalent to InstanceNormalization. When there is only one group, this operator +is equivalent to LayerNormalization.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor. Dimensions for image cases are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width of the data. Statistics are computed for every group of channels over C, H, and W. For non-image cases, the dimensions are in the form of (N x C x D1 x D2 … Dn).

  • +
  • scale (heterogeneous)T: Scale tensor of shape (num_groups).

  • +
  • bias (heterogeneous)T: Bias tensor of shape (num_groups).

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: The output tensor of the same shape as X.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHammingWindow#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHammingWindow(*args, **kwargs)#
+

Version

+

Onnx name: HammingWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Hamming window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxHammingWindow_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHammingWindow_17(*args, **kwargs)#
+

Version

+

Onnx name: HammingWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Hamming window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxHannWindow#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHannWindow(*args, **kwargs)#
+

Version

+

Onnx name: HannWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Hann window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxHannWindow_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHannWindow_17(*args, **kwargs)#
+

Version

+

Onnx name: HannWindow

+

This version of the operator has been available since +version 17.

+

Summary

+

Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106.

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T2. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
  • periodic: If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When ‘periodic’ is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. Default value is +name: "periodic" i: 1 type: INT

  • +
+

Inputs

+
    +
  • size (heterogeneous)T1: A scalar value indicating the length of the window.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: A Hann window with length: size. The output has the shape: [size].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain the input size to int64_t.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxHardSigmoid#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid(*args, **kwargs)#
+

Version

+

Onnx name: HardSigmoid

+

This version of the operator has been available since +version 6.

+

Summary

+

HardSigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Value of alpha. Default value is +name: "alpha" f: 0.20000000298023224 type: FLOAT

  • +
  • beta: Value of beta. Default value is +name: "beta" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardSigmoid_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_1(*args, **kwargs)#
+

Version

+

Onnx name: HardSigmoid

+

This version of the operator has been available since +version 1.

+

Summary

+

HardSigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Value of alpha default to 0.2 Default value is +name: "alpha" f: 0.20000000298023224 type: FLOAT

  • +
  • beta: Value of beta default to 0.5 Default value is +name: "beta" f: 0.5 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardSigmoid_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardSigmoid_6(*args, **kwargs)#
+

Version

+

Onnx name: HardSigmoid

+

This version of the operator has been available since +version 6.

+

Summary

+

HardSigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Value of alpha. Default value is +name: "alpha" f: 0.20000000298023224 type: FLOAT

  • +
  • beta: Value of beta. Default value is +name: "beta" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardSwish#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardSwish(*args, **kwargs)#
+

Version

+

Onnx name: HardSwish

+

This version of the operator has been available since +version 14.

+

Summary

+

HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where +the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x), +where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardSwish_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardSwish_14(*args, **kwargs)#
+

Version

+

Onnx name: HardSwish

+

This version of the operator has been available since +version 14.

+

Summary

+

HardSwish takes one input data (Tensor<T>) and produces one output data (Tensor<T>) where +the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid<alpha, beta>(x), +where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardmax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardmax(*args, **kwargs)#
+

Version

+

Onnx name: Hardmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the hardmax values for the given input:

+
+

Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise

+
+

The “axis” attribute indicates the dimension along which Hardmax +will be performed. The output tensor has the same shape +and contains the Hardmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension Hardmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardmax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardmax_1(*args, **kwargs)#
+

Version

+

Onnx name: Hardmax

+

This version of the operator has been available since +version 1.

+

Summary

+
+
The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

+
+
+

(batch_size x input_feature_dimensions). The output tensor has the same shape +and contains the hardmax values of the corresponding input.

+

Input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardmax_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardmax_11(*args, **kwargs)#
+

Version

+

Onnx name: Hardmax

+

This version of the operator has been available since +version 11.

+

Summary

+
+
The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch

of the given input.

+
+
+

The input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors. The output tensor has the same shape +and contains the hardmax values of the corresponding input.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxHardmax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxHardmax_13(*args, **kwargs)#
+

Version

+

Onnx name: Hardmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the hardmax values for the given input:

+
+

Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise

+
+

The “axis” attribute indicates the dimension along which Hardmax +will be performed. The output tensor has the same shape +and contains the Hardmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension Hardmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxIdentity#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIdentity(*args, **kwargs)#
+

Version

+

Onnx name: Identity

+

This version of the operator has been available since +version 16.

+

Summary

+

Identity operator

+

Inputs

+
    +
  • input (heterogeneous)V: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Tensor to copy input into.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input and output types to all tensor, sequence, and optional types.

  • +
+
+ +
+
+
+
+

OnnxIdentity_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIdentity_1(*args, **kwargs)#
+

Version

+

Onnx name: Identity

+

This version of the operator has been available since +version 1.

+

Summary

+

Identity operator

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor to copy input into.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxIdentity_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIdentity_13(*args, **kwargs)#
+

Version

+

Onnx name: Identity

+

This version of the operator has been available since +version 13.

+

Summary

+

Identity operator

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor to copy input into.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxIdentity_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIdentity_14(*args, **kwargs)#
+

Version

+

Onnx name: Identity

+

This version of the operator has been available since +version 14.

+

Summary

+

Identity operator

+

Inputs

+
    +
  • input (heterogeneous)V: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Tensor to copy input into.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input and output types to all tensor and sequence types.

  • +
+
+ +
+
+
+
+

OnnxIdentity_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIdentity_16(*args, **kwargs)#
+

Version

+

Onnx name: Identity

+

This version of the operator has been available since +version 16.

+

Summary

+

Identity operator

+

Inputs

+
    +
  • input (heterogeneous)V: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Tensor to copy input into.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input and output types to all tensor, sequence, and optional types.

  • +
+
+ +
+
+
+
+

OnnxIf#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIf(*args, **kwargs)#
+

Version

+

Onnx name: If

+

This version of the operator has been available since +version 16.

+

Summary

+

If conditional

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • cond (heterogeneous)B: Condition for the if

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • +
  • B tensor(bool): Only bool

  • +
+
+ +
+
+
+
+

OnnxIf_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIf_1(*args, **kwargs)#
+

Version

+

Onnx name: If

+

This version of the operator has been available since +version 1.

+

Summary

+

If conditional

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • cond (heterogeneous)B: Condition for the if

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same shape and same data type.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
  • B tensor(bool): Only bool

  • +
+
+ +
+
+
+
+

OnnxIf_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIf_11(*args, **kwargs)#
+

Version

+

Onnx name: If

+

This version of the operator has been available since +version 11.

+

Summary

+

If conditional

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • cond (heterogeneous)B: Condition for the if

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
  • B tensor(bool): Only bool

  • +
+
+ +
+
+
+
+

OnnxIf_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIf_13(*args, **kwargs)#
+

Version

+

Onnx name: If

+

This version of the operator has been available since +version 13.

+

Summary

+

If conditional

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • cond (heterogeneous)B: Condition for the if

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): All Tensor and Sequence types

  • +
  • B tensor(bool): Only bool

  • +
+
+ +
+
+
+
+

OnnxIf_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIf_16(*args, **kwargs)#
+

Version

+

Onnx name: If

+

This version of the operator has been available since +version 16.

+

Summary

+

If conditional

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • cond (heterogeneous)B: Condition for the if

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)V: Values that are live-out to the enclosing scope. The return values in the then_branch and else_branch must be of the same data type. The then_branch and else_branch may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of then_branch is typed float tensor with shape [2] and the first output of else_branch is another float tensor with shape [3], If’s first output should have (a) no shape set, or (b) a shape of rank 1 with neither dim_value nor dim_param set, or (c) a shape of rank 1 with a unique dim_param. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • +
  • B tensor(bool): Only bool

  • +
+
+ +
+
+
+
+

OnnxImputer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxImputer(*args, **kwargs)#
+

Version

+

Onnx name: Imputer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Replaces inputs that equal one value with another, leaving all other elements alone.

+

This operator is typically used to replace missing values in situations where they have a canonical +representation, such as -1, 0, NaN, or some extreme value.

+

One and only one of imputed_value_floats or imputed_value_int64s should be defined – floats if the input tensor +holds floats, integers if the input tensor holds integers. The imputed values must all fit within the +width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, +which one depends on whether floats or integers are being processed.

+

The imputed_value attribute length can be 1 element, or it can have one element per input feature. +In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.

+

Attributes

+
    +
  • +
  • +
  • replaced_value_float: A value that needs replacing. Default value is +name: "replaced_value_float" f: 0.0 type: FLOAT

  • +
  • replaced_value_int64: A value that needs replacing. Default value is +name: "replaced_value_int64" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be processed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Imputed output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.

  • +
+
+ +
+
+
+
+

OnnxImputer_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxImputer_1(*args, **kwargs)#
+

Version

+

Onnx name: Imputer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Replaces inputs that equal one value with another, leaving all other elements alone.

+

This operator is typically used to replace missing values in situations where they have a canonical +representation, such as -1, 0, NaN, or some extreme value.

+

One and only one of imputed_value_floats or imputed_value_int64s should be defined – floats if the input tensor +holds floats, integers if the input tensor holds integers. The imputed values must all fit within the +width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, +which one depends on whether floats or integers are being processed.

+

The imputed_value attribute length can be 1 element, or it can have one element per input feature. +In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.

+

Attributes

+
    +
  • +
  • +
  • replaced_value_float: A value that needs replacing. Default value is +name: "replaced_value_float" f: 0.0 type: FLOAT

  • +
  • replaced_value_int64: A value that needs replacing. Default value is +name: "replaced_value_int64" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be processed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Imputed output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.

  • +
+
+ +
+
+
+
+

OnnxInstanceNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization(*args, **kwargs)#
+

Version

+

Onnx name: InstanceNormalization

+

This version of the operator has been available since +version 6.

+

Summary

+

Carries out instance normalization as described in the paper +https://arxiv.org/abs/1607.08022.

+

y = scale * (x - mean) / sqrt(variance + epsilon) + B, +where mean and variance are computed per instance per channel.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • +
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output tensor of the same shape as input.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxInstanceNormalization_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_1(*args, **kwargs)#
+

Version

+

Onnx name: InstanceNormalization

+

This version of the operator has been available since +version 1.

+

Summary

+

Carries out instance normalization as described in the paper +https://arxiv.org/abs/1607.08022.

+

y = scale * (x - mean) / sqrt(variance + epsilon) + B, +where mean and variance are computed per instance per channel.

+

Attributes

+
    +
  • +
  • epsilon: The epsilon value to use to avoid division by zero, default is 1e-5f. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input 4-dimensional tensor of shape NCHW.

  • +
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • +
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output 4-dimensional tensor of the same shape as input.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxInstanceNormalization_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxInstanceNormalization_6(*args, **kwargs)#
+

Version

+

Onnx name: InstanceNormalization

+

This version of the operator has been available since +version 6.

+

Summary

+

Carries out instance normalization as described in the paper +https://arxiv.org/abs/1607.08022.

+

y = scale * (x - mean) / sqrt(variance + epsilon) + B, +where mean and variance are computed per instance per channel.

+

Attributes

+
    +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
  • scale (heterogeneous)T: The input 1-dimensional scale tensor of size C.

  • +
  • B (heterogeneous)T: The input 1-dimensional bias tensor of size C.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output tensor of the same shape as input.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxIsInf#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIsInf(*args, **kwargs)#
+

Version

+

Onnx name: IsInf

+

This version of the operator has been available since +version 10.

+

Summary

+

Map infinity to true and other values to false.

+

Attributes

+
    +
  • detect_negative: (Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false. Default value is +name: "detect_negative" i: 1 type: INT

  • +
  • detect_positive: (Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false. Default value is +name: "detect_positive" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: output

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxIsInf_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIsInf_10(*args, **kwargs)#
+

Version

+

Onnx name: IsInf

+

This version of the operator has been available since +version 10.

+

Summary

+

Map infinity to true and other values to false.

+

Attributes

+
    +
  • detect_negative: (Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false. Default value is +name: "detect_negative" i: 1 type: INT

  • +
  • detect_positive: (Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false. Default value is +name: "detect_positive" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: output

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxIsNaN#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIsNaN(*args, **kwargs)#
+

Version

+

Onnx name: IsNaN

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns which elements of the input are NaN.

+

Inputs

+
    +
  • X (heterogeneous)T1: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: output

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to float tensors.

  • +
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxIsNaN_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIsNaN_13(*args, **kwargs)#
+

Version

+

Onnx name: IsNaN

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns which elements of the input are NaN.

+

Inputs

+
    +
  • X (heterogeneous)T1: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: output

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to float tensors.

  • +
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxIsNaN_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxIsNaN_9(*args, **kwargs)#
+

Version

+

Onnx name: IsNaN

+

This version of the operator has been available since +version 9.

+

Summary

+

Returns which elements of the input are NaN.

+

Inputs

+
    +
  • X (heterogeneous)T1: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: output

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(bool): Constrain output types to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxLRN#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLRN(*args, **kwargs)#
+

Version

+

Onnx name: LRN

+

This version of the operator has been available since +version 13.

+

Summary

+

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). +It normalizes over local input regions. +The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor +of shape (N x C x D1 x D2, …, Dk), its region is +{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

+

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), +where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

+

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

+

Attributes

+
    +
  • alpha: Scaling parameter. Default value is +name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • +
  • beta: The exponent. Default value is +name: "beta" f: 0.75 type: FLOAT

  • +
  • bias: Default value is +name: "bias" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLRN_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLRN_1(*args, **kwargs)#
+

Version

+

Onnx name: LRN

+

This version of the operator has been available since +version 1.

+

Summary

+

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). +It normalizes over local input regions. +The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor +of shape (N x C x D1 x D2, …, Dk), its region is +{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

+

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), +where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

+

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

+

Attributes

+
    +
  • alpha: Scaling parameter. Default value is +name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • +
  • beta: The exponent. Default value is +name: "beta" f: 0.75 type: FLOAT

  • +
  • bias: Default value is +name: "bias" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLRN_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLRN_13(*args, **kwargs)#
+

Version

+

Onnx name: LRN

+

This version of the operator has been available since +version 13.

+

Summary

+

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). +It normalizes over local input regions. +The local region is defined across the channels. For an element X[n, c, d1, …, dk] in a tensor +of shape (N x C x D1 x D2, …, Dk), its region is +{X[n, i, d1, …, dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.

+

square_sum[n, c, d1, …, dk] = sum(X[n, i, d1, …, dk] ^ 2), +where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).

+

Y[n, c, d1, …, dk] = X[n, c, d1, …, dk] / (bias + alpha / size * square_sum[n, c, d1, …, dk] ) ^ beta

+

Attributes

+
    +
  • alpha: Scaling parameter. Default value is +name: "alpha" f: 9.999999747378752e-05 type: FLOAT

  • +
  • beta: The exponent. Default value is +name: "beta" f: 0.75 type: FLOAT

  • +
  • bias: Default value is +name: "bias" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor, which has the shape and type as input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLSTM#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLSTM(*args, **kwargs)#
+

Version

+

Onnx name: LSTM

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer LSTM. This operator is usually supported via some +custom implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • i - input gate

  • +
  • o - output gate

  • +
  • f - forget gate

  • +
  • c - cell gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

  • +
  • R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

  • +
  • Wb[iofc] - W bias vectors for input, output, forget, and cell gates

  • +
  • Rb[iofc] - R bias vectors for input, output, forget, and cell gates

  • +
  • P[iof] - P peephole weight vector for input, output, and forget gates

  • +
  • WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

  • +
  • RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

  • +
  • WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

  • +
  • RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

  • +
  • PB[iof] - P peephole weight vector for backward input, output, and forget gates

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+

NOTE: Below are optional

+
    +
  • Affine(x) - alpha*x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha*Tanh(beta*x)

  • +
  • HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha*(e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

+
    +
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • +
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • +
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • +
  • Ct = ft (.) Ct-1 + it (.) ct

  • +
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • +
  • Ht = ot (.) h(Ct)

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • input_forget: Couple the input and forget gates if 1. Default value is +name: "input_forget" i: 0 type: INT

  • +
  • layout: The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 8 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • +
+

Outputs

+

Between 0 and 3 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxLSTM_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLSTM_1(*args, **kwargs)#
+

Version

+

Onnx name: LSTM

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes an one-layer LSTM. This operator is usually supported via some +custom implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

i - input gate

+

o - output gate

+

f - forget gate

+

c - cell gate

+

t - time step (t-1 means previous time step)

+

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

+

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

+

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

+

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

+

P[iof] - P peephole weight vector for input, output, and forget gates

+

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

+

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

+

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

+

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

+

PB[iof] - P peephole weight vector for backward input, output, and forget gates

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

+
+
    +
  • it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi)

  • +
  • ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf)

  • +
  • ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc)

  • +
  • Ct = ft (.) Ct-1 + it (.) ct

  • +
  • ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo)

  • +
  • Ht = ot (.) h(Ct)

  • +
+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • input_forget: Couple the input and forget gates if 1, default 0. Default value is +name: "input_forget" i: 0 type: INT

  • +
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is +name: "output_sequence" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 8 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • +
+

Outputs

+

Between 0 and 3 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxLSTM_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLSTM_14(*args, **kwargs)#
+

Version

+

Onnx name: LSTM

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer LSTM. This operator is usually supported via some +custom implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • i - input gate

  • +
  • o - output gate

  • +
  • f - forget gate

  • +
  • c - cell gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

  • +
  • R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

  • +
  • Wb[iofc] - W bias vectors for input, output, forget, and cell gates

  • +
  • Rb[iofc] - R bias vectors for input, output, forget, and cell gates

  • +
  • P[iof] - P peephole weight vector for input, output, and forget gates

  • +
  • WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

  • +
  • RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

  • +
  • WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

  • +
  • RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

  • +
  • PB[iof] - P peephole weight vector for backward input, output, and forget gates

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+

NOTE: Below are optional

+
    +
  • Affine(x) - alpha*x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha*Tanh(beta*x)

  • +
  • HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha*(e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

+
    +
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • +
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • +
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • +
  • Ct = ft (.) Ct-1 + it (.) ct

  • +
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • +
  • Ht = ot (.) h(Ct)

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • input_forget: Couple the input and forget gates if 1. Default value is +name: "input_forget" i: 0 type: INT

  • +
  • layout: The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 8 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • +
+

Outputs

+

Between 0 and 3 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxLSTM_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLSTM_7(*args, **kwargs)#
+

Version

+

Onnx name: LSTM

+

This version of the operator has been available since +version 7.

+

Summary

+

Computes an one-layer LSTM. This operator is usually supported via some +custom implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

i - input gate

+

o - output gate

+

f - forget gate

+

c - cell gate

+

t - time step (t-1 means previous time step)

+

W[iofc] - W parameter weight matrix for input, output, forget, and cell gates

+

R[iofc] - R recurrence weight matrix for input, output, forget, and cell gates

+

Wb[iofc] - W bias vectors for input, output, forget, and cell gates

+

Rb[iofc] - R bias vectors for input, output, forget, and cell gates

+

P[iof] - P peephole weight vector for input, output, and forget gates

+

WB[iofc] - W parameter weight matrix for backward input, output, forget, and cell gates

+

RB[iofc] - R recurrence weight matrix for backward input, output, forget, and cell gates

+

WBb[iofc] - W bias vectors for backward input, output, forget, and cell gates

+

RBb[iofc] - R bias vectors for backward input, output, forget, and cell gates

+

PB[iof] - P peephole weight vector for backward input, output, and forget gates

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Sigmoid, g=Tanh, h=Tanh):

+
+
    +
  • it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)

  • +
  • ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)

  • +
  • ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)

  • +
  • Ct = ft (.) Ct-1 + it (.) ct

  • +
  • ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)

  • +
  • Ht = ot (.) h(Ct)

  • +
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • input_forget: Couple the input and forget gates if 1. Default value is +name: "input_forget" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 8 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for the gates. Concatenation of W[iofc] and WB[iofc] (if bidirectional) along dimension 0. The tensor has shape [num_directions, 4*hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of R[iofc] and RB[iofc] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 4*hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wb[iofc], Rb[iofc]], and [WBb[iofc], RBb[iofc]] (if bidirectional) along dimension 0. This tensor has shape [num_directions, 8*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • initial_c (optional, heterogeneous)T: Optional initial value of the cell. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
  • P (optional, heterogeneous)T: The weight tensor for peepholes. Concatenation of P[iof] and PB[iof] (if bidirectional) along dimension 0. It has shape [num_directions, 3*hidde_size]. Optional: If not specified - assumed to be 0.

  • +
+

Outputs

+

Between 0 and 3 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
  • Y_c (optional, heterogeneous)T: The last output value of the cell. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxLabelEncoder#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder(*args, **kwargs)#
+

Version

+

Onnx name: LabelEncoder

+

This version of the operator has been available since +version 2 of domain ai.onnx.ml.

+

Summary

+

Maps each element in the input tensor to another value.

+

The mapping is determined by the two parallel attributes, ‘keys_*’ and +‘values_*’ attribute. The i-th value in the specified ‘keys_*’ attribute +would be mapped to the i-th value in the specified ‘values_*’ attribute. It +implies that input’s element type and the element type of the specified +‘keys_*’ should be identical while the output type is identical to the +specified ‘values_*’ attribute. If an input element can not be found in the +specified ‘keys_*’ attribute, the ‘default_*’ that matches the specified +‘values_*’ attribute may be used as its output value.

+

Let’s consider an example which maps a string tensor to an integer tensor. +Assume and ‘keys_strings’ is [“Amy”, “Sally”], ‘values_int64s’ is [5, 6], +and ‘default_int64’ is ‘-1’. The input [“Dori”, “Amy”, “Amy”, “Sally”, +“Sally”] would be mapped to [-1, 5, 5, 6, 6].

+

Since this operator is an one-to-one mapping, its input and output shapes +are the same. Notice that only one of ‘keys_*’/’values_*’ can be set.

+

For key look-up, bit-wise comparison is used so even a float NaN can be +mapped to a value in ‘values_*’ attribute.

+

Attributes

+
    +
  • default_float: A float. Default value is +name: "default_float" f: -0.0 type: FLOAT

  • +
  • default_int64: An integer. Default value is +name: "default_int64" i: -1 type: INT

  • +
  • default_string: A string. Default value is +name: "default_string" s: "_Unused" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data. It can be either tensor or scalar.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Output data.

  • +
+

Type Constraints

+
    +
  • T1 tensor(string), tensor(int64), tensor(float): The input type is a tensor of any shape.

  • +
  • T2 tensor(string), tensor(int64), tensor(float): Output type is determined by the specified ‘values_*’ attribute.

  • +
+
+ +
+
+
+
+

OnnxLabelEncoder_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_1(*args, **kwargs)#
+

Version

+

Onnx name: LabelEncoder

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Converts strings to integers and vice versa.

+

If the string default value is set, it will convert integers to strings. +If the int default value is set, it will convert strings to integers.

+

Each operator converts either integers to strings or strings to integers, depending +on which default value attribute is provided. Only one default value attribute +should be defined.

+

When converting from integers to strings, the string is fetched from the +‘classes_strings’ list, by simple indexing.

+

When converting from strings to integers, the string is looked up in the list +and the index at which it is found is used as the converted value.

+

Attributes

+
    +
  • +
  • default_int64: An integer to use when an input string value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_int64" i: -1 type: INT

  • +
  • default_string: A string to use when an input integer value is not found in the map.<br>One and only one of the ‘default_*’ attributes must be defined. Default value is +name: "default_string" s: "_Unused" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Output data. If strings are input, the output values are integers, and vice versa.

  • +
+

Type Constraints

+
    +
  • T1 tensor(string), tensor(int64): The input type must be a tensor of integers or strings, of any shape.

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, and will have the same shape as the input.

  • +
+
+ +
+
+
+
+

OnnxLabelEncoder_2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLabelEncoder_2(*args, **kwargs)#
+

Version

+

Onnx name: LabelEncoder

+

This version of the operator has been available since +version 2 of domain ai.onnx.ml.

+

Summary

+

Maps each element in the input tensor to another value.

+

The mapping is determined by the two parallel attributes, ‘keys_*’ and +‘values_*’ attribute. The i-th value in the specified ‘keys_*’ attribute +would be mapped to the i-th value in the specified ‘values_*’ attribute. It +implies that input’s element type and the element type of the specified +‘keys_*’ should be identical while the output type is identical to the +specified ‘values_*’ attribute. If an input element can not be found in the +specified ‘keys_*’ attribute, the ‘default_*’ that matches the specified +‘values_*’ attribute may be used as its output value.

+

Let’s consider an example which maps a string tensor to an integer tensor. +Assume and ‘keys_strings’ is [“Amy”, “Sally”], ‘values_int64s’ is [5, 6], +and ‘default_int64’ is ‘-1’. The input [“Dori”, “Amy”, “Amy”, “Sally”, +“Sally”] would be mapped to [-1, 5, 5, 6, 6].

+

Since this operator is an one-to-one mapping, its input and output shapes +are the same. Notice that only one of ‘keys_*’/’values_*’ can be set.

+

For key look-up, bit-wise comparison is used so even a float NaN can be +mapped to a value in ‘values_*’ attribute.

+

Attributes

+
    +
  • default_float: A float. Default value is +name: "default_float" f: -0.0 type: FLOAT

  • +
  • default_int64: An integer. Default value is +name: "default_int64" i: -1 type: INT

  • +
  • default_string: A string. Default value is +name: "default_string" s: "_Unused" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data. It can be either tensor or scalar.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Output data.

  • +
+

Type Constraints

+
    +
  • T1 tensor(string), tensor(int64), tensor(float): The input type is a tensor of any shape.

  • +
  • T2 tensor(string), tensor(int64), tensor(float): Output type is determined by the specified ‘values_*’ attribute.

  • +
+
+ +
+
+
+
+

OnnxLayerNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLayerNormalization(*args, **kwargs)#
+

Version

+

Onnx name: LayerNormalization

+

This version of the operator has been available since +version 17.

+

Summary

+

This is layer normalization defined in ONNX as function. +The overall computation can be split into two stages. +The first stage is standardization, which makes the +normalized elements have zero mean and unit variances. +The computation required by standardization can be +described by the following equations.

+
Mean = ReduceMean<axes=normalized_axes>(X)
+D = Sub(X, Mean)
+DD = Mul(D, D)
+Var = ReduceMean<axes=normalized_axes>(DD)
+VarEps = Add(Var, epsilon)
+StdDev = Sqrt(VarEps)
+InvStdDev = Reciprocal(StdDev)
+Normalized = Mul(D, InvStdDev)
+
+
+

where normalized_axes is [axis, …, rank of X - 1]. +The variables Var and StdDev stand for variance and +standard deviation, respectively. The second output is +Mean and the last one is InvStdDev. +Depending on stash_type attribute, the actual computation +must happen in different floating-point precision. +For example, if stash_type is 1, this operator casts +all input variables to 32-bit float, perform the computation, and +finally cast Normalized back to the original type of X. +The second stage then scales and shifts the outcome of the +first stage using

+
NormalizedScaled = Mul(Normalized, Scale)
+Y = Add(NormalizedScaled, B)
+
+
+

The second stage doesn’t depends on stash_type. +All equations are in [this syntax](https://github.com/onnx/onnx/blob/main/docs/Syntax.md). +The same variable (i.e., input, output, and attribute) uses +the same name in the equations above and this operator’s definition. +Let d[i] indicate the i-th dimension of X. +If X’s shape is [d[0], …, d[axis-1], d[axis], …, d[rank-1]], +the shape of Mean and InvStdDev is [d[0], …, d[axis-1], 1, …, 1]. +Y and X have the same shape.

+

Attributes

+
    +
  • axis: The first normalization dimension. If rank(X) is r, axis’ allowed range is [-r, r]. Negative value means counting dimensions from the back. Default value is +name: "axis" i: -1 type: INT

  • +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • stash_type: Type of Mean and InvStdDev. This also specifies stage one’s computation precision. Default value is +name: "stash_type" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Tensor to be normalized.

  • +
  • Scale (heterogeneous)T: Scale tensor.

  • +
  • B (optional, heterogeneous)T: Bias tensor.

  • +
+

Outputs

+

Between 1 and 3 outputs.

+
    +
  • Y (heterogeneous)T: Normalized tensor.

  • +
  • Mean (optional, heterogeneous)U: Saved mean used during training to speed up gradient computation

  • +
  • InvStdDev (optional, heterogeneous)U: Saved inverse standard deviation used during training to speed up gradient computation.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types and output Y type to float tensors.

  • +
  • U tensor(float), tensor(bfloat16): Type of Mean and InvStdDev tensors.

  • +
+
+ +
+
+
+
+

OnnxLayerNormalization_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLayerNormalization_17(*args, **kwargs)#
+

Version

+

Onnx name: LayerNormalization

+

This version of the operator has been available since +version 17.

+

Summary

+

This is layer normalization defined in ONNX as function. +The overall computation can be split into two stages. +The first stage is standardization, which makes the +normalized elements have zero mean and unit variances. +The computation required by standardization can be +described by the following equations.

+
Mean = ReduceMean<axes=normalized_axes>(X)
+D = Sub(X, Mean)
+DD = Mul(D, D)
+Var = ReduceMean<axes=normalized_axes>(DD)
+VarEps = Add(Var, epsilon)
+StdDev = Sqrt(VarEps)
+InvStdDev = Reciprocal(StdDev)
+Normalized = Mul(D, InvStdDev)
+
+
+

where normalized_axes is [axis, …, rank of X - 1]. +The variables Var and StdDev stand for variance and +standard deviation, respectively. The second output is +Mean and the last one is InvStdDev. +Depending on stash_type attribute, the actual computation +must happen in different floating-point precision. +For example, if stash_type is 1, this operator casts +all input variables to 32-bit float, perform the computation, and +finally cast Normalized back to the original type of X. +The second stage then scales and shifts the outcome of the +first stage using

+
NormalizedScaled = Mul(Normalized, Scale)
+Y = Add(NormalizedScaled, B)
+
+
+

The second stage doesn’t depends on stash_type. +All equations are in [this syntax](https://github.com/onnx/onnx/blob/main/docs/Syntax.md). +The same variable (i.e., input, output, and attribute) uses +the same name in the equations above and this operator’s definition. +Let d[i] indicate the i-th dimension of X. +If X’s shape is [d[0], …, d[axis-1], d[axis], …, d[rank-1]], +the shape of Mean and InvStdDev is [d[0], …, d[axis-1], 1, …, 1]. +Y and X have the same shape.

+

Attributes

+
    +
  • axis: The first normalization dimension. If rank(X) is r, axis’ allowed range is [-r, r]. Negative value means counting dimensions from the back. Default value is +name: "axis" i: -1 type: INT

  • +
  • epsilon: The epsilon value to use to avoid division by zero. Default value is +name: "epsilon" f: 9.999999747378752e-06 type: FLOAT

  • +
  • stash_type: Type of Mean and InvStdDev. This also specifies stage one’s computation precision. Default value is +name: "stash_type" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T: Tensor to be normalized.

  • +
  • Scale (heterogeneous)T: Scale tensor.

  • +
  • B (optional, heterogeneous)T: Bias tensor.

  • +
+

Outputs

+

Between 1 and 3 outputs.

+
    +
  • Y (heterogeneous)T: Normalized tensor.

  • +
  • Mean (optional, heterogeneous)U: Saved mean used during training to speed up gradient computation

  • +
  • InvStdDev (optional, heterogeneous)U: Saved inverse standard deviation used during training to speed up gradient computation.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types and output Y type to float tensors.

  • +
  • U tensor(float), tensor(bfloat16): Type of Mean and InvStdDev tensors.

  • +
+
+ +
+
+
+
+

OnnxLeakyRelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu(*args, **kwargs)#
+

Version

+

Onnx name: LeakyRelu

+

This version of the operator has been available since +version 16.

+

Summary

+

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one +output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, +f(x) = x for x >= 0, is applied to the data tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of leakage. Default value is +name: "alpha" f: 0.009999999776482582 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLeakyRelu_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_1(*args, **kwargs)#
+

Version

+

Onnx name: LeakyRelu

+

This version of the operator has been available since +version 1.

+

Summary

+

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one +output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, +f(x) = x for x >= 0, is applied to the data tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of leakage default to 0.01. Default value is +name: "alpha" f: 0.009999999776482582 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLeakyRelu_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_16(*args, **kwargs)#
+

Version

+

Onnx name: LeakyRelu

+

This version of the operator has been available since +version 16.

+

Summary

+

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one +output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, +f(x) = x for x >= 0, is applied to the data tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of leakage. Default value is +name: "alpha" f: 0.009999999776482582 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLeakyRelu_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLeakyRelu_6(*args, **kwargs)#
+

Version

+

Onnx name: LeakyRelu

+

This version of the operator has been available since +version 6.

+

Summary

+

LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one +output data (Tensor<T>) where the function f(x) = alpha * x for x < 0, +f(x) = x for x >= 0, is applied to the data tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of leakage. Default value is +name: "alpha" f: 0.009999999776482582 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLess#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLess(*args, **kwargs)#
+

Version

+

Onnx name: Less

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the tensor resulted from performing the less logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLessOrEqual#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual(*args, **kwargs)#
+

Version

+

Onnx name: LessOrEqual

+

This version of the operator has been available since +version 16.

+

Summary

+

Returns the tensor resulted from performing the less_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLessOrEqual_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_12(*args, **kwargs)#
+

Version

+

Onnx name: LessOrEqual

+

This version of the operator has been available since +version 12.

+

Summary

+

Returns the tensor resulted from performing the less_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLessOrEqual_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLessOrEqual_16(*args, **kwargs)#
+

Version

+

Onnx name: LessOrEqual

+

This version of the operator has been available since +version 16.

+

Summary

+

Returns the tensor resulted from performing the less_equal logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLess_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLess_1(*args, **kwargs)#
+

Version

+

Onnx name: Less

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the less logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input to float tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLess_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLess_13(*args, **kwargs)#
+

Version

+

Onnx name: Less

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the tensor resulted from performing the less logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLess_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLess_7(*args, **kwargs)#
+

Version

+

Onnx name: Less

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the less logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input to float tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLess_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLess_9(*args, **kwargs)#
+

Version

+

Onnx name: Less

+

This version of the operator has been available since +version 9.

+

Summary

+

Returns the tensor resulted from performing the less logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input types to all numeric tensors.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxLinearClassifier#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLinearClassifier(*args, **kwargs)#
+

Version

+

Onnx name: LinearClassifier

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Linear classifier

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • multi_class: Indicates whether to do OvR or multinomial (0=OvR is the default). Default value is +name: "multi_class" i: 0 type: INT

  • +
  • post_transform: Indicates the transform to apply to the scores vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Data to be classified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • +
  • Z (heterogeneous)tensor(float): Classification scores ([N,E] - one score for each class and example

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]

  • +
  • T2 tensor(string), tensor(int64): The output will be a tensor of strings or integers.

  • +
+
+ +
+
+
+
+

OnnxLinearClassifier_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLinearClassifier_1(*args, **kwargs)#
+

Version

+

Onnx name: LinearClassifier

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Linear classifier

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • multi_class: Indicates whether to do OvR or multinomial (0=OvR is the default). Default value is +name: "multi_class" i: 0 type: INT

  • +
  • post_transform: Indicates the transform to apply to the scores vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Data to be classified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • +
  • Z (heterogeneous)tensor(float): Classification scores ([N,E] - one score for each class and example

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]

  • +
  • T2 tensor(string), tensor(int64): The output will be a tensor of strings or integers.

  • +
+
+ +
+
+
+
+

OnnxLinearRegressor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLinearRegressor(*args, **kwargs)#
+

Version

+

Onnx name: LinearRegressor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Generalized linear regression evaluation.

+

If targets is set to 1 (default) then univariate regression is performed.

+

If targets is set to M then M sets of coefficients must be passed in as a sequence +and M results will be output for each input n in N.

+

The coefficients array is of length n, and the coefficients for each target are contiguous. +Intercepts are optional but if provided must match the number of targets.

+

Attributes

+
    +
  • +
  • +
  • post_transform: Indicates the transform to apply to the regression output vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • targets: The total number of regression targets, 1 if not defined. Default value is +name: "targets" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be regressed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Regression outputs (one per target, per example).

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxLinearRegressor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLinearRegressor_1(*args, **kwargs)#
+

Version

+

Onnx name: LinearRegressor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Generalized linear regression evaluation.

+

If targets is set to 1 (default) then univariate regression is performed.

+

If targets is set to M then M sets of coefficients must be passed in as a sequence +and M results will be output for each input n in N.

+

The coefficients array is of length n, and the coefficients for each target are contiguous. +Intercepts are optional but if provided must match the number of targets.

+

Attributes

+
    +
  • +
  • +
  • post_transform: Indicates the transform to apply to the regression output vector.<br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • targets: The total number of regression targets, 1 if not defined. Default value is +name: "targets" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be regressed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Regression outputs (one per target, per example).

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxLog#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLog(*args, **kwargs)#
+

Version

+

Onnx name: Log

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the natural log of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLogSoftmax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax(*args, **kwargs)#
+

Version

+

Onnx name: LogSoftmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the log of softmax values for the given input:

+
+

LogSoftmax(input, axis) = Log(Softmax(input, axis=axis))

+
+

The “axis” attribute indicates the dimension along which LogSoftmax +will be performed. The output tensor has the same shape +and contains the LogSoftmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension LogSoftmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLogSoftmax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_1(*args, **kwargs)#
+

Version

+

Onnx name: LogSoftmax

+

This version of the operator has been available since +version 1.

+

Summary

+
+
The operator computes the logsoftmax (log of softmax) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

+
+
+

(batch_size x input_feature_dimensions). The output tensor has the same shape +and contains the logsoftmax values of the corresponding input.

+

Input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLogSoftmax_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_11(*args, **kwargs)#
+

Version

+

Onnx name: LogSoftmax

+

This version of the operator has been available since +version 11.

+

Summary

+
+
The operator computes the logsoftmax (log of softmax) values for each layer in the batch

of the given input.

+
+
+

The input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors. The output tensor has the same shape +and contains the logsoftmax values of the corresponding input.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLogSoftmax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLogSoftmax_13(*args, **kwargs)#
+

Version

+

Onnx name: LogSoftmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the log of softmax values for the given input:

+
+

LogSoftmax(input, axis) = Log(Softmax(input, axis=axis))

+
+

The “axis” attribute indicates the dimension along which LogSoftmax +will be performed. The output tensor has the same shape +and contains the LogSoftmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension LogSoftmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLog_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLog_1(*args, **kwargs)#
+

Version

+

Onnx name: Log

+

This version of the operator has been available since +version 1.

+

Summary

+

Calculates the natural log of the given input tensor, element-wise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLog_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLog_13(*args, **kwargs)#
+

Version

+

Onnx name: Log

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the natural log of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLog_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLog_6(*args, **kwargs)#
+

Version

+

Onnx name: Log

+

This version of the operator has been available since +version 6.

+

Summary

+

Calculates the natural log of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The natural log of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLoop#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLoop(*args, **kwargs)#
+

Version

+

Onnx name: Loop

+

This version of the operator has been available since +version 16.

+

Summary

+

Generic Looping construct. This loop has multiple termination conditions:

+
    +
  1. Trip count. Iteration count specified at runtime. Set by +specifying the input M. Optional. Set to empty string to omit. +Note that a static trip count (specified at graph construction time) can be +specified by passing in a constant node for input M.

  2. +
  3. Loop termination condition. This is an input to the op that determines +whether to run the first iteration and also a loop-carried dependency for +the body graph. The body graph must yield a value for the condition variable, +whether this input is provided or not.

  4. +
+

This table summarizes the operating modes of this operator with equivalent +C-style code:

+

Operator inputs defined as (max_trip_count, condition_var).

+
    +
  • +
    input (“”, “”):
    +
    for (int i=0; ; ++i) {

    cond = … // Note this value is ignored, but is required in the body

    +
    +
    +

    }

    +
    +
    +
  • +
  • +
    input (“”, cond) // Note this is analogous to a while loop

    bool cond = …; +for (int i=0; cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (“”, 1) // Note this is analogous to a do-while loop

    bool cond = true +for (int i=0; cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (trip_count, “”) // Note this is analogous to a for loop

    int trip_count = … +for (int i=0; i < trip_count; ++i) {

    +
    +

    cond = …; // ignored

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (trip_count, cond)

    int trip_count = …; +bool cond = …; +for (int i=0; i < trip_count && cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
+

Sample usage - cond as well as trip count

+
+
+
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() +%b = Constant[value = <Scalar Tensor [6]>]() +%keepgoing = Constant[value = <Scalar Tensor [1]>]() +%max_trip_count = Constant[value = <Scalar Tensor [10]>]() +%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) +return

+
+
+

}

+
+
graph body-net (

%i[INT32, scalar] // iteration number +%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used +%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

+
+
) {

%my_local = Add(%a, %b_in) +%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b +%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition +%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated +return %keepgoing_out, %b_out, %user_defined_val

+
+
+

}

+
+

Sample equivalent C code

+
+
+
{

/* User-defined code (enclosing scope) / +int a = 3, b = 6; +bool keepgoing = true; // Analogous to input cond +/ End user-defined code */

+

/* Implicitly-defined code / +const int max_trip_count = 10; // Analogous to input M +int user_defined_vals[]; // Imagine this is resizable +/ End implicitly-defined code / +/ initialize loop-carried variables and scan-output variables */ +bool keepgoing_out = keepgoing +int b_out = b

+
+
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
+
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

+
+
+

bool keepgoing_in = keepgoing_out; +bool b_in = b_out;

+

/* User-defined code (loop body) / +int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine +b_out = a - b_in; +keepgoing_out = my_local > b_out; +user_defined_val = b_in + b_in; // b_in and b_out are different variables +/ End user-defined code */

+

/* Implicitly defined-code */ +user_defined_vals[i] = user_defined_val // accumulate scan-output values

+
+
+

} +// int t = my_local; // Can’t do this. my_local is not accessible here.

+

// The values below are bound to the output variables of the loop and therefore accessible +// b_out; user_defined_vals; keepgoing_out;

+
+
+

}

+
+

There are several things of note in this code snippet:

+
    +
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can +be referenced in the inputs of the loop.

  2. +
  3. Any values computed in the loop body that needs to be used in a subsequent +iteration or after the loop are modelled using a pair of variables in the loop-body, +consisting of an input variable (eg., b_in) and an output variable (eg., b_out). +These are referred to as loop-carried dependences. The loop operation node +supplies the input value of the input variable for the first iteration, and +returns the output value of the output variable produced by the final +iteration.

  4. +
  5. Scan_output variables are used to implicitly concatenate values computed across +all the iterations. In the above example, the value of user_defined_val computed +over all iterations are concatenated and returned as the value of user_defined_vals +after the loop.

  6. +
  7. Values created in the body cannot be accessed in the enclosing scope, +except using the mechanism described above.

  8. +
+

Note that the semantics of this op support “diagonal” or “wavefront” execution. +(See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators (with +time being the inner looping dimension), with each successive layer consuming +the scan_outputs from the previous layer, possibly going through several +point-wise operators (e.g. dropout, residual connections, linear layer).

+

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 2 and 2147483647 inputs.

+
    +
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • +
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • +
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • +
  • I tensor(int64): tensor of int64, which should be a scalar.

  • +
  • B tensor(bool): tensor of bool, which should be a scalar.

  • +
+
+ +
+
+
+
+

OnnxLoop_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLoop_1(*args, **kwargs)#
+

Version

+

Onnx name: Loop

+

This version of the operator has been available since +version 1.

+

Summary

+

Generic Looping construct. This loop has multiple termination conditions:

+
    +
  1. Trip count. Iteration count specified at runtime. Set by +specifying the input M. Optional. Set to empty string to omit. +Note that a static trip count (specified at graph construction time) can be +specified by passing in a constant node for input M.

  2. +
  3. Loop termination condition. This is an input to the op that determines +whether to run the first iteration and also a loop-carried dependency for +the body graph. The body graph must yield a value for the condition variable, +whether this input is provided or not.

  4. +
+

This table summarizes the operating modes of this operator with equivalent +C-style code:

+
+

Operator inputs defined as (max_trip_count, condition_var).

+
+
input (“”, “”):
+
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

+
+
+

}

+
+
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … +for (int i=0; i < trip_count; ++i) {

+
+

cond = …; // ignored

+
+

}

+
+
input (trip_count, cond)

int trip_count = …; +bool cond = …; +for (int i=0; i < trip_count && cond; ++i) {

+
+

cond = …;

+
+

}

+
+
+
+

Sample usage - cond as well as trip count

+
+
+
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() +%b = Constant[value = <Scalar Tensor [6]>]() +%keepgoing = Constant[value = <Scalar Tensor [1]>]() +%max_trip_count = Constant[value = <Scalar Tensor [10]>]() +%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) +return

+
+
+

}

+
+
graph body-net (

%i[INT32, scalar] +%keepgoing[BOOL, scalar] +%b[INT32, scalar]

+
+
) {

%my_local = Add(%a, %b) +%b_out = Sub(%a, %b) +%keepgoing_out = Greater(%my_local, %b_out) +%user_defined_vals = Add(%b, %b) +return %keepgoing_out, %b_out, %user_defined_vals

+
+
+

}

+
+

Sample equivalent C code

+
+
+
{

/* User-defined code (enclosing scope) / +int a = 3, b = 6; +bool keepgoing = true; // Analogous to input cond +/ End user-defined code */

+

/* Implicitly-defined code / +const int max_trip_count = 10; // Analogous to input M +int user_defined_vals[]; // Imagine this is resizable +/ End implicitly-defined code */ +for (int i=0; i < max_trip_count && keepgoing; ++i) {

+
+

/* User-defined code (loop body) / +int my_local = a + b; // Reading values in the enclosing scope is fine +b = a - b; // writes fine if we specify b as a loop-carried dependency +keepgoing = my_local > b; // keepgoing is a loop-carried dependency +user_defined_vals[i] = b + b; +/ End user-defined code */

+
+

} +// my_local = 123; // Can’t do this. my_local was defined in the body

+

// These below values are live-out from the loop and therefore accessible +b_out; user_defined_vals; keepgoing_out;

+
+
+

}

+
+

There are several things of note in this code snippet:

+
    +
  1. Values from the enclosing scope (i.e. variable a here) are in scope and can +be referenced in the inputs of the loop.

  2. +
  3. Any variables which you wish to make available in the enclosing scope (i.e. +the variables b and keepgoing) must be declared as either loop-carried +dependencies (both at the op inputs and output and at the body net input and +output) or scan_outputs.

  4. +
  5. Values created in the body cannot be accessed in the enclosing scope.

  6. +
+

Note that the semantics of this op support “diagonal” or “wavefront” execution. +(See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators (with +time being the inner looping dimension), with each successive layer consuming +the scan_outputs from the previous layer, possibly going through several +point-wise operators (e.g. dropout, residual connections, linear layer).

+

Attributes

+
    +
  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • +
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • +
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
  • I tensor(int64): tensor of int64, which should be a scalar.

  • +
  • B tensor(bool): tensor of bool, which should be a scalar.

  • +
+
+ +
+
+
+
+

OnnxLoop_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLoop_11(*args, **kwargs)#
+

Version

+

Onnx name: Loop

+

This version of the operator has been available since +version 11.

+

Summary

+

Generic Looping construct. This loop has multiple termination conditions:

+
    +
  1. Trip count. Iteration count specified at runtime. Set by +specifying the input M. Optional. Set to empty string to omit. +Note that a static trip count (specified at graph construction time) can be +specified by passing in a constant node for input M.

  2. +
  3. Loop termination condition. This is an input to the op that determines +whether to run the first iteration and also a loop-carried dependency for +the body graph. The body graph must yield a value for the condition variable, +whether this input is provided or not.

  4. +
+

This table summarizes the operating modes of this operator with equivalent +C-style code:

+
+

Operator inputs defined as (max_trip_count, condition_var).

+
+
input (“”, “”):
+
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

+
+
+

}

+
+
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … +for (int i=0; i < trip_count; ++i) {

+
+

cond = …; // ignored

+
+

}

+
+
input (trip_count, cond)

int trip_count = …; +bool cond = …; +for (int i=0; i < trip_count && cond; ++i) {

+
+

cond = …;

+
+

}

+
+
+
+

Sample usage - cond as well as trip count

+
+
+
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() +%b = Constant[value = <Scalar Tensor [6]>]() +%keepgoing = Constant[value = <Scalar Tensor [1]>]() +%max_trip_count = Constant[value = <Scalar Tensor [10]>]() +%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) +return

+
+
+

}

+
+
graph body-net (

%i[INT32, scalar] // iteration number +%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used +%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

+
+
) {

%my_local = Add(%a, %b_in) +%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b +%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition +%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated +return %keepgoing_out, %b_out, %user_defined_val

+
+
+

}

+
+

Sample equivalent C code

+
+
+
{

/* User-defined code (enclosing scope) / +int a = 3, b = 6; +bool keepgoing = true; // Analogous to input cond +/ End user-defined code */

+

/* Implicitly-defined code / +const int max_trip_count = 10; // Analogous to input M +int user_defined_vals[]; // Imagine this is resizable +/ End implicitly-defined code / +/ initialize loop-carried variables and scan-output variables */ +bool keepgoing_out = keepgoing +int b_out = b

+
+
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
+
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

+
+
+

bool keepgoing_in = keepgoing_out; +bool b_in = b_out;

+

/* User-defined code (loop body) / +int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine +b_out = a - b_in; +keepgoing_out = my_local > b_out; +user_defined_val = b_in + b_in; // b_in and b_out are different variables +/ End user-defined code */

+

/* Implicitly defined-code */ +user_defined_vals[i] = user_defined_val // accumulate scan-output values

+
+
+

} +// int t = my_local; // Can’t do this. my_local is not accessible here.

+

// The values below are bound to the output variables of the loop and therefore accessible +// b_out; user_defined_vals; keepgoing_out;

+
+
+

}

+
+

There are several things of note in this code snippet:

+
    +
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can +be referenced in the inputs of the loop.

  2. +
  3. Any values computed in the loop body that needs to be used in a subsequent +iteration or after the loop are modelled using a pair of variables in the loop-body, +consisting of an input variable (eg., b_in) and an output variable (eg., b_out). +These are referred to as loop-carried dependences. The loop operation node +supplies the input value of the input variable for the first iteration, and +returns the output value of the output variable produced by the final +iteration.

  4. +
  5. Scan_output variables are used to implicitly concatenate values computed across +all the iterations. In the above example, the value of user_defined_val computed +over all iterations are concatenated and returned as the value of user_defined_vals +after the loop.

  6. +
  7. Values created in the body cannot be accessed in the enclosing scope, +except using the mechanism described above.

  8. +
+

Note that the semantics of this op support “diagonal” or “wavefront” execution. +(See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators (with +time being the inner looping dimension), with each successive layer consuming +the scan_outputs from the previous layer, possibly going through several +point-wise operators (e.g. dropout, residual connections, linear layer).

+

Attributes

+
    +
  • +
+

Inputs

+

Between 2 and 2147483647 inputs.

+
    +
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • +
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • +
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
  • I tensor(int64): tensor of int64, which should be a scalar.

  • +
  • B tensor(bool): tensor of bool, which should be a scalar.

  • +
+
+ +
+
+
+
+

OnnxLoop_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLoop_13(*args, **kwargs)#
+

Version

+

Onnx name: Loop

+

This version of the operator has been available since +version 13.

+

Summary

+

Generic Looping construct. This loop has multiple termination conditions:

+
    +
  1. Trip count. Iteration count specified at runtime. Set by +specifying the input M. Optional. Set to empty string to omit. +Note that a static trip count (specified at graph construction time) can be +specified by passing in a constant node for input M.

  2. +
  3. Loop termination condition. This is an input to the op that determines +whether to run the first iteration and also a loop-carried dependency for +the body graph. The body graph must yield a value for the condition variable, +whether this input is provided or not.

  4. +
+

This table summarizes the operating modes of this operator with equivalent +C-style code:

+
+

Operator inputs defined as (max_trip_count, condition_var).

+
+
input (“”, “”):
+
for (int i=0; ; ++i) {

cond = … // Note this value is ignored, but is required in the body

+
+
+

}

+
+
input (“”, cond) // Note this is analogous to a while loop

bool cond = …; +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (“”, 1) // Note this is analogous to a do-while loop

bool cond = true +for (int i=0; cond; ++i) {

+
+

cond = …;

+
+

}

+
+
input (trip_count, “”) // Note this is analogous to a for loop

int trip_count = … +for (int i=0; i < trip_count; ++i) {

+
+

cond = …; // ignored

+
+

}

+
+
input (trip_count, cond)

int trip_count = …; +bool cond = …; +for (int i=0; i < trip_count && cond; ++i) {

+
+

cond = …;

+
+

}

+
+
+
+

Sample usage - cond as well as trip count

+
+
+
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() +%b = Constant[value = <Scalar Tensor [6]>]() +%keepgoing = Constant[value = <Scalar Tensor [1]>]() +%max_trip_count = Constant[value = <Scalar Tensor [10]>]() +%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) +return

+
+
+

}

+
+
graph body-net (

%i[INT32, scalar] // iteration number +%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used +%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

+
+
) {

%my_local = Add(%a, %b_in) +%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b +%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition +%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated +return %keepgoing_out, %b_out, %user_defined_val

+
+
+

}

+
+

Sample equivalent C code

+
+
+
{

/* User-defined code (enclosing scope) / +int a = 3, b = 6; +bool keepgoing = true; // Analogous to input cond +/ End user-defined code */

+

/* Implicitly-defined code / +const int max_trip_count = 10; // Analogous to input M +int user_defined_vals[]; // Imagine this is resizable +/ End implicitly-defined code / +/ initialize loop-carried variables and scan-output variables */ +bool keepgoing_out = keepgoing +int b_out = b

+
+
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
+
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

+
+
+

bool keepgoing_in = keepgoing_out; +bool b_in = b_out;

+

/* User-defined code (loop body) / +int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine +b_out = a - b_in; +keepgoing_out = my_local > b_out; +user_defined_val = b_in + b_in; // b_in and b_out are different variables +/ End user-defined code */

+

/* Implicitly defined-code */ +user_defined_vals[i] = user_defined_val // accumulate scan-output values

+
+
+

} +// int t = my_local; // Can’t do this. my_local is not accessible here.

+

// The values below are bound to the output variables of the loop and therefore accessible +// b_out; user_defined_vals; keepgoing_out;

+
+
+

}

+
+

There are several things of note in this code snippet:

+
    +
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can +be referenced in the inputs of the loop.

  2. +
  3. Any values computed in the loop body that needs to be used in a subsequent +iteration or after the loop are modelled using a pair of variables in the loop-body, +consisting of an input variable (eg., b_in) and an output variable (eg., b_out). +These are referred to as loop-carried dependences. The loop operation node +supplies the input value of the input variable for the first iteration, and +returns the output value of the output variable produced by the final +iteration.

  4. +
  5. Scan_output variables are used to implicitly concatenate values computed across +all the iterations. In the above example, the value of user_defined_val computed +over all iterations are concatenated and returned as the value of user_defined_vals +after the loop.

  6. +
  7. Values created in the body cannot be accessed in the enclosing scope, +except using the mechanism described above.

  8. +
+

Note that the semantics of this op support “diagonal” or “wavefront” execution. +(See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators (with +time being the inner looping dimension), with each successive layer consuming +the scan_outputs from the previous layer, possibly going through several +point-wise operators (e.g. dropout, residual connections, linear layer).

+

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 2 and 2147483647 inputs.

+
    +
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • +
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • +
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): All Tensor and Sequence types

  • +
  • I tensor(int64): tensor of int64, which should be a scalar.

  • +
  • B tensor(bool): tensor of bool, which should be a scalar.

  • +
+
+ +
+
+
+
+

OnnxLoop_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLoop_16(*args, **kwargs)#
+

Version

+

Onnx name: Loop

+

This version of the operator has been available since +version 16.

+

Summary

+

Generic Looping construct. This loop has multiple termination conditions:

+
    +
  1. Trip count. Iteration count specified at runtime. Set by +specifying the input M. Optional. Set to empty string to omit. +Note that a static trip count (specified at graph construction time) can be +specified by passing in a constant node for input M.

  2. +
  3. Loop termination condition. This is an input to the op that determines +whether to run the first iteration and also a loop-carried dependency for +the body graph. The body graph must yield a value for the condition variable, +whether this input is provided or not.

  4. +
+

This table summarizes the operating modes of this operator with equivalent +C-style code:

+

Operator inputs defined as (max_trip_count, condition_var).

+
    +
  • +
    input (“”, “”):
    +
    for (int i=0; ; ++i) {

    cond = … // Note this value is ignored, but is required in the body

    +
    +
    +

    }

    +
    +
    +
  • +
  • +
    input (“”, cond) // Note this is analogous to a while loop

    bool cond = …; +for (int i=0; cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (“”, 1) // Note this is analogous to a do-while loop

    bool cond = true +for (int i=0; cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (trip_count, “”) // Note this is analogous to a for loop

    int trip_count = … +for (int i=0; i < trip_count; ++i) {

    +
    +

    cond = …; // ignored

    +
    +

    }

    +
    +
    +
  • +
  • +
    input (trip_count, cond)

    int trip_count = …; +bool cond = …; +for (int i=0; i < trip_count && cond; ++i) {

    +
    +

    cond = …;

    +
    +

    }

    +
    +
    +
  • +
+

Sample usage - cond as well as trip count

+
+
+
graph predict-net {

%a = Constant[value = <Scalar Tensor [3]>]() +%b = Constant[value = <Scalar Tensor [6]>]() +%keepgoing = Constant[value = <Scalar Tensor [1]>]() +%max_trip_count = Constant[value = <Scalar Tensor [10]>]() +%keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) +return

+
+
+

}

+
+
graph body-net (

%i[INT32, scalar] // iteration number +%keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used +%b_in[INT32, scalar] // incoming value of loop-carried-dependency b

+
+
) {

%my_local = Add(%a, %b_in) +%b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b +%keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition +%user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated +return %keepgoing_out, %b_out, %user_defined_val

+
+
+

}

+
+

Sample equivalent C code

+
+
+
{

/* User-defined code (enclosing scope) / +int a = 3, b = 6; +bool keepgoing = true; // Analogous to input cond +/ End user-defined code */

+

/* Implicitly-defined code / +const int max_trip_count = 10; // Analogous to input M +int user_defined_vals[]; // Imagine this is resizable +/ End implicitly-defined code / +/ initialize loop-carried variables and scan-output variables */ +bool keepgoing_out = keepgoing +int b_out = b

+
+
for (int i=0; i < max_trip_count && keepgoing_out; ++i) {
+
/* Implicitly-defined code: bind actual parameter values

to formal parameter variables of loop-body */

+
+
+

bool keepgoing_in = keepgoing_out; +bool b_in = b_out;

+

/* User-defined code (loop body) / +int my_local = a + b_in; // Reading value “a” from the enclosing scope is fine +b_out = a - b_in; +keepgoing_out = my_local > b_out; +user_defined_val = b_in + b_in; // b_in and b_out are different variables +/ End user-defined code */

+

/* Implicitly defined-code */ +user_defined_vals[i] = user_defined_val // accumulate scan-output values

+
+
+

} +// int t = my_local; // Can’t do this. my_local is not accessible here.

+

// The values below are bound to the output variables of the loop and therefore accessible +// b_out; user_defined_vals; keepgoing_out;

+
+
+

}

+
+

There are several things of note in this code snippet:

+
    +
  1. Values from the enclosing scope (i.e. variable “a” here) are in scope and can +be referenced in the inputs of the loop.

  2. +
  3. Any values computed in the loop body that needs to be used in a subsequent +iteration or after the loop are modelled using a pair of variables in the loop-body, +consisting of an input variable (eg., b_in) and an output variable (eg., b_out). +These are referred to as loop-carried dependences. The loop operation node +supplies the input value of the input variable for the first iteration, and +returns the output value of the output variable produced by the final +iteration.

  4. +
  5. Scan_output variables are used to implicitly concatenate values computed across +all the iterations. In the above example, the value of user_defined_val computed +over all iterations are concatenated and returned as the value of user_defined_vals +after the loop.

  6. +
  7. Values created in the body cannot be accessed in the enclosing scope, +except using the mechanism described above.

  8. +
+

Note that the semantics of this op support “diagonal” or “wavefront” execution. +(See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators (with +time being the inner looping dimension), with each successive layer consuming +the scan_outputs from the previous layer, possibly going through several +point-wise operators (e.g. dropout, residual connections, linear layer).

+

The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 2 and 2147483647 inputs.

+
    +
  • M (optional, heterogeneous)I: A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.

  • +
  • cond (optional, heterogeneous)B: A boolean termination condition. Optional. Pass empty string to skip.

  • +
  • v_initial (variadic)V: The initial values of any loop-carried dependencies (values that change across loop iterations)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • v_final_and_scan_outputs (variadic)V: Final N loop carried dependency values then K scan_outputs. Scan outputs must be Tensors.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(bfloat16)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)), optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(bfloat16))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(bfloat16)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): All Tensor, Sequence(Tensor), Optional(Tensor), and Optional(Sequence(Tensor)) types

  • +
  • I tensor(int64): tensor of int64, which should be a scalar.

  • +
  • B tensor(bool): tensor of bool, which should be a scalar.

  • +
+
+ +
+
+
+
+

OnnxLpNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpNormalization(*args, **kwargs)#
+

Version

+

Onnx name: LpNormalization

+

This version of the operator has been available since +version 1.

+

Summary

+

Given a matrix, apply Lp-normalization along the provided axis.

+

Attributes

+
    +
  • axis: The axis on which to apply normalization, -1 mean last axis. Default value is +name: "axis" i: -1 type: INT

  • +
  • p: The order of the normalization, only 1 or 2 are supported. Default value is +name: "p" i: 2 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input matrix

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Matrix after normalization

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLpNormalization_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpNormalization_1(*args, **kwargs)#
+

Version

+

Onnx name: LpNormalization

+

This version of the operator has been available since +version 1.

+

Summary

+

Given a matrix, apply Lp-normalization along the provided axis.

+

Attributes

+
    +
  • axis: The axis on which to apply normalization, -1 mean last axis. Default value is +name: "axis" i: -1 type: INT

  • +
  • p: The order of the normalization, only 1 or 2 are supported. Default value is +name: "p" i: 2 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input matrix

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Matrix after normalization

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLpPool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpPool(*args, **kwargs)#
+

Version

+

Onnx name: LpPool

+

This version of the operator has been available since +version 18.

+

Summary

+

LpPool consumes an input tensor X and applies Lp pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +Lp pooling consisting of computing the Lp norm on all values of a subset +of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i]
+
+
+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxLpPool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpPool_1(*args, **kwargs)#
+

Version

+

Onnx name: LpPool

+

This version of the operator has been available since +version 1.

+

Summary

+

LpPool consumes an input tensor X and applies Lp pooling across the +the tensor according to kernel sizes, stride sizes, and pad lengths. +Lp pooling consisting of computing the Lp norm on all values of a subset +of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. DEPRECATION NOTE: auto_pad is only intended to support legacy uses, and for framework authors, one is explicitly encouraged to use explicit padding specified in the pads attribute. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • p: p value of the Lp norm used to pool over the input data, default is 2.0. Default value is +name: "p" f: 2.0 type: FLOAT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLpPool_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpPool_11(*args, **kwargs)#
+

Version

+

Onnx name: LpPool

+

This version of the operator has been available since +version 11.

+

Summary

+

LpPool consumes an input tensor X and applies Lp pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +Lp pooling consisting of computing the Lp norm on all values of a subset +of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxLpPool_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpPool_18(*args, **kwargs)#
+

Version

+

Onnx name: LpPool

+

This version of the operator has been available since +version 18.

+

Summary

+

LpPool consumes an input tensor X and applies Lp pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +Lp pooling consisting of computing the Lp norm on all values of a subset +of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i]
+
+
+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+
+ +
+
+
+
+

OnnxLpPool_2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxLpPool_2(*args, **kwargs)#
+

Version

+

Onnx name: LpPool

+

This version of the operator has been available since +version 2.

+

Summary

+

LpPool consumes an input tensor X and applies Lp pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +Lp pooling consisting of computing the Lp norm on all values of a subset +of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • p: p value of the Lp norm used to pool over the input data. Default value is +name: "p" i: 2 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMatMul#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMul(*args, **kwargs)#
+

Version

+

Onnx name: MatMul

+

This version of the operator has been available since +version 13.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

+

Inputs

+
    +
  • A (heterogeneous)T: N-dimensional matrix A

  • +
  • B (heterogeneous)T: N-dimensional matrix B

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxMatMulInteger#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMulInteger(*args, **kwargs)#
+

Version

+

Onnx name: MatMulInteger

+

This version of the operator has been available since +version 10.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • A (heterogeneous)T1: N-dimensional matrix A

  • +
  • B (heterogeneous)T2: N-dimensional matrix B

  • +
  • a_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘A’. It’s optional and default value is 0. It could be a scalar or N-D tensor. Scalar refers to per tensor quantization whereas N-D refers to per row quantization. If the input is 2D of shape [M, K] then zero point tensor may be an M element vector [zp_1, zp_2, …, zp_M]. If the input is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have shape [D1, D2, M, 1].

  • +
  • b_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘B’. It’s optional and default value is 0. It could be a scalar or a N-D tensor, Scalar refers to per tensor quantization whereas N-D refers to per col quantization. If the input is 2D of shape [K, N] then zero point tensor may be an N element vector [zp_1, zp_2, …, zp_N]. If the input is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have shape [D1, D2, 1, N].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T3: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input A data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input B data type to 8-bit integer tensor.

  • +
  • T3 tensor(int32): Constrain output Y data type as 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxMatMulInteger_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMulInteger_10(*args, **kwargs)#
+

Version

+

Onnx name: MatMulInteger

+

This version of the operator has been available since +version 10.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.

+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • A (heterogeneous)T1: N-dimensional matrix A

  • +
  • B (heterogeneous)T2: N-dimensional matrix B

  • +
  • a_zero_point (optional, heterogeneous)T1: Zero point tensor for input ‘A’. It’s optional and default value is 0. It could be a scalar or N-D tensor. Scalar refers to per tensor quantization whereas N-D refers to per row quantization. If the input is 2D of shape [M, K] then zero point tensor may be an M element vector [zp_1, zp_2, …, zp_M]. If the input is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have shape [D1, D2, M, 1].

  • +
  • b_zero_point (optional, heterogeneous)T2: Zero point tensor for input ‘B’. It’s optional and default value is 0. It could be a scalar or a N-D tensor, Scalar refers to per tensor quantization whereas N-D refers to per col quantization. If the input is 2D of shape [K, N] then zero point tensor may be an N element vector [zp_1, zp_2, …, zp_N]. If the input is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have shape [D1, D2, 1, N].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T3: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input A data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input B data type to 8-bit integer tensor.

  • +
  • T3 tensor(int32): Constrain output Y data type as 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxMatMul_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMul_1(*args, **kwargs)#
+

Version

+

Onnx name: MatMul

+

This version of the operator has been available since +version 1.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

+

Inputs

+
    +
  • A (heterogeneous)T: N-dimensional matrix A

  • +
  • B (heterogeneous)T: N-dimensional matrix B

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMatMul_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMul_13(*args, **kwargs)#
+

Version

+

Onnx name: MatMul

+

This version of the operator has been available since +version 13.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

+

Inputs

+
    +
  • A (heterogeneous)T: N-dimensional matrix A

  • +
  • B (heterogeneous)T: N-dimensional matrix B

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(bfloat16): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxMatMul_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMatMul_9(*args, **kwargs)#
+

Version

+

Onnx name: MatMul

+

This version of the operator has been available since +version 9.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html

+

Inputs

+
    +
  • A (heterogeneous)T: N-dimensional matrix A

  • +
  • B (heterogeneous)T: N-dimensional matrix B

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Matrix multiply results from A * B

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxMax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMaxPool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 12.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • +
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. This attribute is used only to convert an n-tuple index value into a single integer value for producing the second output. Default value is +name: "storage_order" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8): Constrain input and output types to float and 8 bit tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+
+ +
+
+
+
+

OnnxMaxPool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool_1(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 1.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMaxPool_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool_10(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 10.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled

+
* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • +
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is +name: "storage_order" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+
+ +
+
+
+
+

OnnxMaxPool_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool_11(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 11.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled

+
* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • +
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is +name: "storage_order" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+
+ +
+
+
+
+

OnnxMaxPool_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool_12(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 12.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)
+
+
+
+

or#

+
+

output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)

+
+

if ceil_mode is enabled pad_shape[i] is the sum of pads along axis i.

+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • ceil_mode: Whether to use ceil or floor (default) to compute the output shape. Default value is +name: "ceil_mode" i: 0 type: INT

  • +
  • +
  • +
  • +
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. This attribute is used only to convert an n-tuple index value into a single integer value for producing the second output. Default value is +name: "storage_order" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8): Constrain input and output types to float and 8 bit tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+
+ +
+
+
+
+

OnnxMaxPool_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxPool_8(*args, **kwargs)#
+

Version

+

Onnx name: MaxPool

+

This version of the operator has been available since +version 8.

+

Summary

+

MaxPool consumes an input tensor X and applies max pooling across +the tensor according to kernel sizes, stride sizes, and pad lengths. +max pooling consisting of computing the max on all values of a +subset of the input tensor according to the kernel size and downsampling the +data into the output tensor Y for further processing. The output spatial shape will be following:

+
output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)
+
+* pad_shape[i] is sum of pads along axis i
+
+
+

auto_pad is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:

+
VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])
+SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])
+
+
+

And pad shape will be following if SAME_UPPER or SAME_LOWER:

+
pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]
+
+
+

The output of each pooling window is maximum number of elements exclude pad.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • +
  • storage_order: The storage order of the tensor. 0 is row major, and 1 is column major. Default value is +name: "storage_order" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • Y (heterogeneous)T: Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used

  • +
  • Indices (optional, heterogeneous)I: Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x … x Dn).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxMaxRoiPool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool(*args, **kwargs)#
+

Version

+

Onnx name: MaxRoiPool

+

This version of the operator has been available since +version 1.

+

Summary

+

ROI max pool consumes an input tensor X and region of interests (RoIs) to +apply max pooling across each RoI, to produce output 4-D tensor of shape +(num_rois, channels, pooled_shape[0], pooled_shape[1]).

+

Attributes

+
    +
  • +
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling. Default value is +name: "spatial_scale" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • +
  • rois (heterogeneous)T: RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMaxRoiPool_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxRoiPool_1(*args, **kwargs)#
+

Version

+

Onnx name: MaxRoiPool

+

This version of the operator has been available since +version 1.

+

Summary

+

ROI max pool consumes an input tensor X and region of interests (RoIs) to +apply max pooling across each RoI, to produce output 4-D tensor of shape +(num_rois, channels, pooled_shape[0], pooled_shape[1]).

+

Attributes

+
    +
  • +
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling. Default value is +name: "spatial_scale" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • +
  • rois (heterogeneous)T: RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], …].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMaxUnpool#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool(*args, **kwargs)#
+

Version

+

Onnx name: MaxUnpool

+

This version of the operator has been available since +version 11.

+

Summary

+
+
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the output information from a MaxPool op. The first +input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) +from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding +to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. +The third (optional) input is a tensor that specifies the output size of the unpooling operation.

+
+
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling +the result of an unpooling operation should give back the original input to the unpooling op.

+
+
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of +known/predictable size.

+
+
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding +pooling op that the unpooling op is trying to invert.

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • +
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T2 tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxMaxUnpool_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_11(*args, **kwargs)#
+

Version

+

Onnx name: MaxUnpool

+

This version of the operator has been available since +version 11.

+

Summary

+
+
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the output information from a MaxPool op. The first +input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) +from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding +to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. +The third (optional) input is a tensor that specifies the output size of the unpooling operation.

+
+
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling +the result of an unpooling operation should give back the original input to the unpooling op.

+
+
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of +known/predictable size.

+
+
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding +pooling op that the unpooling op is trying to invert.

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • +
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T2 tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxMaxUnpool_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMaxUnpool_9(*args, **kwargs)#
+

Version

+

Onnx name: MaxUnpool

+

This version of the operator has been available since +version 9.

+

Summary

+
+
MaxUnpool essentially computes the partial inverse of the MaxPool op.

The input information to this op is typically the output information from a MaxPool op. The first +input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) +from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding +to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. +The third (optional) input is a tensor that specifies the output size of the unpooling operation.

+
+
MaxUnpool is intended to do ‘partial’ inverse of the MaxPool op. ‘Partial’ because all the non-maximal

values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling +the result of an unpooling operation should give back the original input to the unpooling op.

+
+
MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.

The third input argument, output_size, is meant to disambiguate the op and produce output tensor of +known/predictable size.

+
+
In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,

which define the exact unpooling op. The attributes typically have the same values as the corrsponding +pooling op that the unpooling op is trying to invert.

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • X (heterogeneous)T1: Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • I (heterogeneous)T2: Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x … x Dn).

  • +
  • output_shape (optional, heterogeneous)T2: The shape of the output can be explicitly set which will cause pads values to be auto generated. If ‘output_shape’ is specified, ‘pads’ values are ignored.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: Output data tensor that contains the result of the unpooling.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T2 tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxMax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax_1(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 1.

+

Summary

+

Element-wise max of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMax_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax_12(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 12.

+

Summary

+

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax_13(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMax_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax_6(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 6.

+

Summary

+

Element-wise max of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMax_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMax_8(*args, **kwargs)#
+

Version

+

Onnx name: Max

+

This version of the operator has been available since +version 8.

+

Summary

+

Element-wise max of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for max.

  • +
+

Outputs

+
    +
  • max (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMean#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMean(*args, **kwargs)#
+

Version

+

Onnx name: Mean

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • +
+

Outputs

+
    +
  • mean (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMeanVarianceNormalization#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization(*args, **kwargs)#
+

Version

+

Onnx name: MeanVarianceNormalization

+

This version of the operator has been available since +version 13.

+

Summary

+

A MeanVarianceNormalization Function: Perform mean variance normalization +on the input tensor X using formula: (X-EX)/sqrt(E(X-EX)^2)

+

Attributes

+
    +
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is +name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMeanVarianceNormalization_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_13(*args, **kwargs)#
+

Version

+

Onnx name: MeanVarianceNormalization

+

This version of the operator has been available since +version 13.

+

Summary

+

A MeanVarianceNormalization Function: Perform mean variance normalization +on the input tensor X using formula: (X-EX)/sqrt(E(X-EX)^2)

+

Attributes

+
    +
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is +name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMeanVarianceNormalization_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMeanVarianceNormalization_9(*args, **kwargs)#
+

Version

+

Onnx name: MeanVarianceNormalization

+

This version of the operator has been available since +version 9.

+

Summary

+

A MeanVarianceNormalization Function: Perform mean variance normalization +on the input tensor X using formula: <br/> ` (X-EX)/sqrt(E(X-EX)^2) `

+

Attributes

+
    +
  • axes: A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance. Default value is +name: "axes" ints: 0 ints: 2 ints: 3 type: INTS

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMean_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMean_1(*args, **kwargs)#
+

Version

+

Onnx name: Mean

+

This version of the operator has been available since +version 1.

+

Summary

+

Element-wise mean of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Mean.

  • +
+

Outputs

+
    +
  • mean (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMean_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMean_13(*args, **kwargs)#
+

Version

+

Onnx name: Mean

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • +
+

Outputs

+
    +
  • mean (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMean_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMean_6(*args, **kwargs)#
+

Version

+

Onnx name: Mean

+

This version of the operator has been available since +version 6.

+

Summary

+

Element-wise mean of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Mean.

  • +
+

Outputs

+
    +
  • mean (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMean_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMean_8(*args, **kwargs)#
+

Version

+

Onnx name: Mean

+

This version of the operator has been available since +version 8.

+

Summary

+

Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for mean.

  • +
+

Outputs

+
    +
  • mean (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMelWeightMatrix#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMelWeightMatrix(*args, **kwargs)#
+

Version

+

Onnx name: MelWeightMatrix

+

This version of the operator has been available since +version 17.

+

Summary

+

Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale. +This function defines the mel scale in terms of a frequency in hertz according to the following formula:

+
+

mel(f) = 2595 * log10(1 + f/700)

+
+

In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0.

+

The returned MelWeightMatrix can be used to right-multiply a spectrogram S of shape [frames, num_spectrogram_bins] of linear scale spectrum values (e.g. STFT magnitudes) to generate a “mel spectrogram” M of shape [frames, num_mel_bins].

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T3. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
+

Inputs

+
    +
  • num_mel_bins (heterogeneous)T1: The number of bands in the mel spectrum.

  • +
  • dft_length (heterogeneous)T1: The size of the original DFT. The size of the original DFT is used to infer the size of the onesided DFT, which is understood to be floor(dft_length/2) + 1, i.e. the spectrogram only contains the nonredundant DFT bins.

  • +
  • sample_rate (heterogeneous)T1: Samples per second of the input signal used to create the spectrogram. Used to figure out the frequencies corresponding to each spectrogram bin, which dictates how they are mapped into the mel scale.

  • +
  • lower_edge_hertz (heterogeneous)T2: Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band.

  • +
  • upper_edge_hertz (heterogeneous)T2: The desired top edge of the highest frequency band.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T3: The Mel Weight Matrix. The output has the shape: [floor(dft_length/2) + 1][num_mel_bins].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain to integer tensors.

  • +
  • T2 tensor(float), tensor(float16), tensor(double), tensor(bfloat16): Constrain to float tensors

  • +
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain to any numerical types.

  • +
+
+ +
+
+
+
+

OnnxMelWeightMatrix_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMelWeightMatrix_17(*args, **kwargs)#
+

Version

+

Onnx name: MelWeightMatrix

+

This version of the operator has been available since +version 17.

+

Summary

+

Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale. +This function defines the mel scale in terms of a frequency in hertz according to the following formula:

+
+

mel(f) = 2595 * log10(1 + f/700)

+
+

In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0.

+

The returned MelWeightMatrix can be used to right-multiply a spectrogram S of shape [frames, num_spectrogram_bins] of linear scale spectrum values (e.g. STFT magnitudes) to generate a “mel spectrogram” M of shape [frames, num_mel_bins].

+

Attributes

+
    +
  • output_datatype: The data type of the output tensor. Strictly must be one of the values from DataType enum in TensorProto whose values correspond to T3. The default value is 1 = FLOAT. Default value is +name: "output_datatype" i: 1 type: INT

  • +
+

Inputs

+
    +
  • num_mel_bins (heterogeneous)T1: The number of bands in the mel spectrum.

  • +
  • dft_length (heterogeneous)T1: The size of the original DFT. The size of the original DFT is used to infer the size of the onesided DFT, which is understood to be floor(dft_length/2) + 1, i.e. the spectrogram only contains the nonredundant DFT bins.

  • +
  • sample_rate (heterogeneous)T1: Samples per second of the input signal used to create the spectrogram. Used to figure out the frequencies corresponding to each spectrogram bin, which dictates how they are mapped into the mel scale.

  • +
  • lower_edge_hertz (heterogeneous)T2: Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band.

  • +
  • upper_edge_hertz (heterogeneous)T2: The desired top edge of the highest frequency band.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T3: The Mel Weight Matrix. The output has the shape: [floor(dft_length/2) + 1][num_mel_bins].

  • +
+

Type Constraints

+
    +
  • T1 tensor(int32), tensor(int64): Constrain to integer tensors.

  • +
  • T2 tensor(float), tensor(float16), tensor(double), tensor(bfloat16): Constrain to float tensors

  • +
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain to any numerical types.

  • +
+
+ +
+
+
+
+

OnnxMin#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMin_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin_1(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 1.

+

Summary

+

Element-wise min of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Min

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMin_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin_12(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 12.

+

Summary

+

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMin_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin_13(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMin_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin_6(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 6.

+

Summary

+

Element-wise min of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Min

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMin_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMin_8(*args, **kwargs)#
+

Version

+

Onnx name: Min

+

This version of the operator has been available since +version 8.

+

Summary

+

Element-wise min of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for min.

  • +
+

Outputs

+
    +
  • min (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMish#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMish(*args, **kwargs)#
+

Version

+

Onnx name: Mish

+

This version of the operator has been available since +version 18.

+

Summary

+

Mish: A Self Regularized Non-Monotonic Neural Activation Function.

+

Perform the linear unit element-wise on the input tensor X using formula:

+
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
+
+
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input X and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMish_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMish_18(*args, **kwargs)#
+

Version

+

Onnx name: Mish

+

This version of the operator has been available since +version 18.

+

Summary

+

Mish: A Self Regularized Non-Monotonic Neural Activation Function.

+

Perform the linear unit element-wise on the input tensor X using formula:

+
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
+
+
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input X and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMod#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMod(*args, **kwargs)#
+

Version

+

Onnx name: Mod

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary modulus (with Numpy-style broadcasting support). +The sign of the remainder is the same as that of the Divisor.

+

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend +(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. +This attribute is set to 0 by default causing the behavior to be like integer mod. +Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

+

If the input type is floating point, then fmod attribute must be set to 1.

+

In case of dividend being zero, the results will be platform dependent.

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is +name: "fmod" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Dividend tensor

  • +
  • B (heterogeneous)T: Divisor tensor

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Remainder tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMod_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMod_10(*args, **kwargs)#
+

Version

+

Onnx name: Mod

+

This version of the operator has been available since +version 10.

+

Summary

+
+
Performs element-wise binary modulus (with Numpy-style broadcasting support).

The sign of the remainder is the same as that of the Divisor.

+

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend +(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. +This attribute is set to 0 by default causing the behavior to be like integer mod. +Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

+

If the input type is floating point, then fmod attribute must be set to 1.

+

In case of dividend being zero, the results will be platform dependent.

+
+
+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is +name: "fmod" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Dividend tensor

  • +
  • B (heterogeneous)T: Divisor tensor

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Remainder tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMod_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMod_13(*args, **kwargs)#
+

Version

+

Onnx name: Mod

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary modulus (with Numpy-style broadcasting support). +The sign of the remainder is the same as that of the Divisor.

+

Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend +(in contrast to integer mod). To force a behavior like numpy.fmod() an ‘fmod’ Attribute is provided. +This attribute is set to 0 by default causing the behavior to be like integer mod. +Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().

+

If the input type is floating point, then fmod attribute must be set to 1.

+

In case of dividend being zero, the results will be platform dependent.

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Attributes

+
    +
  • fmod: Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment Default value is +name: "fmod" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Dividend tensor

  • +
  • B (heterogeneous)T: Divisor tensor

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Remainder tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMomentum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMomentum(*args, **kwargs)#
+

Version

+

Onnx name: Momentum

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of stochastic gradient update with momentum. +This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. As you can imagine, SG with momentum requires +several parameters:

+
+
    +
  • The learning-rate “R”.

  • +
  • The update count “T”. That is, the number of conducted training iterations. It should +be zero in the first training iteration.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A decay coefficient of previous accumulated gradient (i.e., momentum) “alpha”.

  • +
  • The scaling coefficient of current gradient “beta”.

  • +
  • An attribute to choose either standard momentum or Nesterov’s momentum “mode” should +be used.

  • +
+
+

For the sake of simplicity, assume that there is only one tensor (called “X”) to be optimized. +Other necessary inputs are “X“‘s gradient (called “G”) and “X“‘s momentum (called “V”). This +Momentum operator maps all these inputs to the new value of “X” (called “X_new”) and its new +momentum (called “V_new”).

+

This operator supports two different momentum algorithms. Set the attribute “mode” to +“nesterov” if Nesterov’s momentum is desired. Otherwise, set the attribute “model” to +“standard” to use standard momentum. Computation details are described subsequently.

+

Let “+”, “-”, “*”, and “/” are all element-wise operations with numpy-style broadcasting.

+

Pseudo code for SG with standard momentum:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared +// values of all elements in X. +G_regularized = norm_coefficient * X + G

+

// In the first training iteration, beta should always be 1. +beta_adjusted = T > 0 ? beta : 1

+

// Compute the current momentum based on previous momentum and the current gradient. +V_new = alpha * V + beta_adjusted * G_regularized

+

// Update X. +X_new = X - R * V_new

+
+

Pseudo code for SG with Nesterov’s momentum:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared +// values of all elements in X. +G_regularized = norm_coefficient * X + G;

+

// In the first training iteration, beta should always be 1. +beta_adjusted = T > 0 ? beta : 1

+

// Compute the current momentum based on previous momentum and the current gradient. +V_new = alpha * V + beta_adjusted * G_regularized;

+

// Compute final update direction and then update X. +X_new = X - R * (G_regularized + alpha * V_new)

+
+

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”. The same +pseudo code would be extended to handle all tensors jointly. More specifically, we can view “X” as a +concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should +be concatenated too) and then our pseudo code becomes applicable.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The learning rate.

  • +
  • T (heterogeneous)T2: Update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, The expected input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, momentum of “X_1”, momentum of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new momentum of “X_1”, new momentum of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMomentum_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMomentum_1(*args, **kwargs)#
+

Version

+

Onnx name: Momentum

+

This version of the operator has been available since +version 1 of domain ai.onnx.preview.training.

+

Summary

+

Compute one iteration of stochastic gradient update with momentum. +This operator can conduct the optimization of multiple tensor variables.

+

Let’s define the behavior of this operator. As you can imagine, SG with momentum requires +several parameters:

+
+
    +
  • The learning-rate “R”.

  • +
  • The update count “T”. That is, the number of conducted training iterations. It should +be zero in the first training iteration.

  • +
  • A L2-norm regularization coefficient “norm_coefficient”.

  • +
  • A decay coefficient of previous accumulated gradient (i.e., momentum) “alpha”.

  • +
  • The scaling coefficient of current gradient “beta”.

  • +
  • An attribute to choose either standard momentum or Nesterov’s momentum “mode” should +be used.

  • +
+
+

For the sake of simplicity, assume that there is only one tensor (called “X”) to be optimized. +Other necessary inputs are “X“‘s gradient (called “G”) and “X“‘s momentum (called “V”). This +Momentum operator maps all these inputs to the new value of “X” (called “X_new”) and its new +momentum (called “V_new”).

+

This operator supports two different momentum algorithms. Set the attribute “mode” to +“nesterov” if Nesterov’s momentum is desired. Otherwise, set the attribute “model” to +“standard” to use standard momentum. Computation details are described subsequently.

+

Let “+”, “-”, “*”, and “/” are all element-wise operations with numpy-style broadcasting.

+

Pseudo code for SG with standard momentum:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared +// values of all elements in X. +G_regularized = norm_coefficient * X + G

+

// In the first training iteration, beta should always be 1. +beta_adjusted = T > 0 ? beta : 1

+

// Compute the current momentum based on previous momentum and the current gradient. +V_new = alpha * V + beta_adjusted * G_regularized

+

// Update X. +X_new = X - R * V_new

+
+

Pseudo code for SG with Nesterov’s momentum:

+
+

// Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared +// values of all elements in X. +G_regularized = norm_coefficient * X + G;

+

// In the first training iteration, beta should always be 1. +beta_adjusted = T > 0 ? beta : 1

+

// Compute the current momentum based on previous momentum and the current gradient. +V_new = alpha * V + beta_adjusted * G_regularized;

+

// Compute final update direction and then update X. +X_new = X - R * (G_regularized + alpha * V_new)

+
+

If one assign this operators to optimize multiple inputs, for example, “X_1” and “X_2”. The same +pseudo code would be extended to handle all tensors jointly. More specifically, we can view “X” as a +concatenation of “X_1” and “X_2” (of course, their gradient and accumulate gradient should +be concatenated too) and then our pseudo code becomes applicable.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 3 and 2147483647 inputs.

+
    +
  • R (heterogeneous)T1: The learning rate.

  • +
  • T (heterogeneous)T2: Update count of “X”. It should be a scalar.

  • +
  • inputs (variadic)T3: It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, The expected input list would be [“X_1”, “X_2”, gradient of “X_1”, gradient of “X_2”, momentum of “X_1”, momentum of “X_2”].

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic)T3: It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors “X_1” and “X_2” are optimized, the output list would be [new value of “X_1,” new value of “X_2” new momentum of “X_1”, new momentum of “X_2”].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double): Constrain input types to float scalars.

  • +
  • T2 tensor(int64): Constrain input types to 64-bit integer scalars.

  • +
  • T3 tensor(float), tensor(double): Constrain input types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMul#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMul_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul_1(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 1.

+

Summary

+

Performs element-wise binary multiplication (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxMul_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul_13(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMul_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul_14(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMul_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul_6(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 6.

+

Summary

+

Performs element-wise binary multiplication (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMul_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMul_7(*args, **kwargs)#
+

Version

+

Onnx name: Mul

+

This version of the operator has been available since +version 7.

+

Summary

+

Performs element-wise binary multiplication (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxMultinomial#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMultinomial(*args, **kwargs)#
+

Version

+

Onnx name: Multinomial

+

This version of the operator has been available since +version 7.

+

Summary

+

Generate a tensor of samples from a multinomial distribution according to the probabilities +of each of the possible outcomes.

+

Attributes

+
    +
  • dtype: (Optional) The data type for the elements of the output tensor, if not specified, we will use int32. Default value is +name: "dtype" i: 6 type: INT

  • +
  • sample_size: Number of times to sample. Default value is +name: "sample_size" i: 1 type: INT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain output types to integral tensors.

  • +
+
+ +
+
+
+
+

OnnxMultinomial_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxMultinomial_7(*args, **kwargs)#
+

Version

+

Onnx name: Multinomial

+

This version of the operator has been available since +version 7.

+

Summary

+

Generate a tensor of samples from a multinomial distribution according to the probabilities +of each of the possible outcomes.

+

Attributes

+
    +
  • dtype: (Optional) The data type for the elements of the output tensor, if not specified, we will use int32. Default value is +name: "dtype" i: 6 type: INT

  • +
  • sample_size: Number of times to sample. Default value is +name: "sample_size" i: 1 type: INT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain output types to integral tensors.

  • +
+
+ +
+
+
+
+

OnnxNeg#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNeg(*args, **kwargs)#
+

Version

+

Onnx name: Neg

+

This version of the operator has been available since +version 13.

+

Summary

+

Neg takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where each element flipped sign, y = -x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxNeg_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNeg_1(*args, **kwargs)#
+

Version

+

Onnx name: Neg

+

This version of the operator has been available since +version 1.

+

Summary

+

Neg takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where each element flipped sign, y = -x, is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxNeg_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNeg_13(*args, **kwargs)#
+

Version

+

Onnx name: Neg

+

This version of the operator has been available since +version 13.

+

Summary

+

Neg takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where each element flipped sign, y = -x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxNeg_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNeg_6(*args, **kwargs)#
+

Version

+

Onnx name: Neg

+

This version of the operator has been available since +version 6.

+

Summary

+

Neg takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where each element flipped sign, y = -x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double): Constrain input and output types to signed numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxNegativeLogLikelihoodLoss#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss(*args, **kwargs)#
+

Version

+

Onnx name: NegativeLogLikelihoodLoss

+

This version of the operator has been available since +version 13.

+

Summary

+

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. +Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. +The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). +The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) +or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. +The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].
+
+
+

When an optional “weight” is provided, the sample loss is calculated as:

+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].
+
+
+

loss is zero for the case when target-value equals ignore_index.

+
loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index
+
+
+

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). +If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

+
mean(loss), if "weight" is not provided,
+
+
+

or if weight is provided,

+
sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.
+
+
+

If “reduction” attribute is set to “sum”, the output is a scalar: sum(loss).

+

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.

+

Example 1:

+
// negative log likelihood loss, "none" reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+          [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+
+loss = np.zeros((N, d1))
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1]
+
+// print(loss)
+// [[-3. -2.]
+//  [-0. -2.]]
+
+
+

Example 2:

+
// weighted negative log likelihood loss, sum reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+        [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+weight = [0.2, 0.3, 0.1]
+loss = np.zeros((N, d1))
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1] * weight[c]
+
+loss = np.sum(loss)
+// print(loss)
+// -1.1
+
+
+

Example 3:

+
// weighted negative log likelihood loss, mean reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+        [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+weight = [0.2, 0.3, 0.1]
+loss = np.zeros((N, d1))
+weight_total = 0
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1] * weight[c]
+        weight_total = weight_total + weight[c]
+
+loss = np.sum(loss) / weight_total
+// print(loss)
+// -1.57
+
+
+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • +
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+
    +
  • loss (heterogeneous)T: The negative log likelihood loss

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+ +
+
+
+
+

OnnxNegativeLogLikelihoodLoss_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_12(*args, **kwargs)#
+

Version

+

Onnx name: NegativeLogLikelihoodLoss

+

This version of the operator has been available since +version 12.

+

Summary

+

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. +Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. +The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). +The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) +or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. +The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

+
+

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k].

+
+
+
When an optional “weight” is provided, the sample loss is calculated as:

loss[n][d_1][d_2]…[d_k] = -input[n][c][d_1][d_2]…[d_k] * weight[c].

+
+
+

loss is zero for the case when target-value equals ignore_index.

+
+

loss[n][d_1][d_2]…[d_k] = 0, when target[n][d_1][d_2]…[d_k] = ignore_index

+
+

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). +If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

+
+

mean(loss), if “weight” is not provided,

+
+
+
or if weight is provided,

sum(loss) / sum(weight[target[n][d_1][d_2]…[d_k]]]), for all samples.

+
+
If “reduction” attribute is set to “sum”, the output is a scalar:

sum(loss).

+
+
+

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. +Example 1:

+
+

// negative log likelihood loss, “none” reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

+
+

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

+
+

target = [[2, 1], [0, 2]] +loss = np.zeros((N, d1)) +for n in range(N):

+
+
+
for d_1 in range(d1):

c = target[n][d_1] +loss[n][d_1] = -input[n][c][d_1]

+
+
+
+

// print(loss) +// [[-3. -2.] +// [-0. -2.]]

+
+
+
Example 2:

// weighted negative log likelihood loss, sum reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

+
+

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

+
+

target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +for n in range(N):

+
+
+
for d_1 in range(d1):

c = target[n][d_1] +loss[n][d_1] = -input[n][c][d_1] * weight[c]

+
+
+
+

loss = np.sum(loss) +// print(loss) +// -1.1

+
+
Example 3:

// weighted negative log likelihood loss, mean reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],

+
+

[[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]

+
+

target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +weight_total = 0 +for n in range(N):

+
+
+
for d_1 in range(d1):

c = target[n][d_1] +loss[n][d_1] = -input[n][c][d_1] * weight[c] +weight_total = weight_total + weight[c]

+
+
+
+

loss = np.sum(loss) / weight_total +// print(loss) +// -1.57

+
+
+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • +
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+
    +
  • loss (heterogeneous)T: The negative log likelihood loss

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+ +
+
+
+
+

OnnxNegativeLogLikelihoodLoss_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNegativeLogLikelihoodLoss_13(*args, **kwargs)#
+

Version

+

Onnx name: NegativeLogLikelihoodLoss

+

This version of the operator has been available since +version 13.

+

Summary

+

A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. +Its “input” tensor has the shape of (N, C, d1, d2, …, dk) where k >= 0. +The “input” tensor contains log-probabilities for input[n, :, d_1, d_2,…, d_k] being in a class of [0, C). +The operator’s “target” input tensor has the shape of (N, d1, d2, …, dk). It encodes class labels (one of C classes) +or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x … x dk samples. +The loss value for input[n, :, d_1, d_2,…d_k] being classified as class c = target[n][d_1][d_2]…[d_k] is computed as:

+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].
+
+
+

When an optional “weight” is provided, the sample loss is calculated as:

+
loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].
+
+
+

loss is zero for the case when target-value equals ignore_index.

+
loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index
+
+
+

If “reduction” attribute is set to “none”, the operator’s output will be the above loss with shape (N, d1, d2, …, dk). +If “reduction” attribute is set to “mean” (the default attribute value), the output loss is (weight) averaged:

+
mean(loss), if "weight" is not provided,
+
+
+

or if weight is provided,

+
sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.
+
+
+

If “reduction” attribute is set to “sum”, the output is a scalar: sum(loss).

+

See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.

+

Example 1:

+
// negative log likelihood loss, "none" reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+          [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+
+loss = np.zeros((N, d1))
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1]
+
+// print(loss)
+// [[-3. -2.]
+//  [-0. -2.]]
+
+
+

Example 2:

+
// weighted negative log likelihood loss, sum reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+        [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+weight = [0.2, 0.3, 0.1]
+loss = np.zeros((N, d1))
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1] * weight[c]
+
+loss = np.sum(loss)
+// print(loss)
+// -1.1
+
+
+

Example 3:

+
// weighted negative log likelihood loss, mean reduction
+N, C, d1 = 2, 3, 2
+input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],
+        [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]
+target = [[2, 1], [0, 2]]
+weight = [0.2, 0.3, 0.1]
+loss = np.zeros((N, d1))
+weight_total = 0
+for n in range(N):
+    for d_1 in range(d1):
+        c = target[n][d_1]
+        loss[n][d_1] = -input[n][c][d_1] * weight[c]
+        weight_total = weight_total + weight[c]
+
+loss = np.sum(loss) / weight_total
+// print(loss)
+// -1.57
+
+
+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean (default). ‘none’: the output is the loss for each sample. ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the sum of applied weights. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • input (heterogeneous)T: Input tensor of shape (N, C) or (N, C, d1, d2, …, dk).

  • +
  • target (heterogeneous)Tind: Target tensor of shape (N) or (N, d1, d2, …, dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weight (optional, heterogeneous)T: Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+
    +
  • loss (heterogeneous)T: The negative log likelihood loss

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input, weight, and output types to floating-point tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+ +
+
+
+
+

OnnxNonMaxSuppression#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression(*args, **kwargs)#
+

Version

+

Onnx name: NonMaxSuppression

+

This version of the operator has been available since +version 11.

+

Summary

+

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. +Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. +Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to +orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system +result in the same boxes being selected by the algorithm. +The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. +The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

+

Attributes

+
    +
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is +name: "center_point_box" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 5 inputs.

+
    +
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • +
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • +
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • +
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • +
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • +
+

Outputs

+
    +
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • +
+
+ +
+
+
+
+

OnnxNonMaxSuppression_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_10(*args, **kwargs)#
+

Version

+

Onnx name: NonMaxSuppression

+

This version of the operator has been available since +version 10.

+

Summary

+

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. +Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. +Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to +orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system +result in the same boxes being selected by the algorithm. +The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. +The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

+

Attributes

+
    +
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is +name: "center_point_box" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 5 inputs.

+
    +
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • +
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • +
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • +
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • +
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • +
+

Outputs

+
    +
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • +
+
+ +
+
+
+
+

OnnxNonMaxSuppression_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonMaxSuppression_11(*args, **kwargs)#
+

Version

+

Onnx name: NonMaxSuppression

+

This version of the operator has been available since +version 11.

+

Summary

+

Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. +Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. +Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to +orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system +result in the same boxes being selected by the algorithm. +The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. +The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.

+

Attributes

+
    +
  • center_point_box: Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. Default value is +name: "center_point_box" i: 0 type: INT

  • +
+

Inputs

+

Between 2 and 5 inputs.

+
    +
  • boxes (heterogeneous)tensor(float): An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.

  • +
  • scores (heterogeneous)tensor(float): An input tensor with shape [num_batches, num_classes, spatial_dimension]

  • +
  • max_output_boxes_per_class (optional, heterogeneous)tensor(int64): Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.

  • +
  • iou_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.

  • +
  • score_threshold (optional, heterogeneous)tensor(float): Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.

  • +
+

Outputs

+
    +
  • selected_indices (heterogeneous)tensor(int64): selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].

  • +
+
+ +
+
+
+
+

OnnxNonZero#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonZero(*args, **kwargs)#
+

Version

+

Onnx name: NonZero

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the indices of the elements that are non-zero +(in row-major order - by dimension). +NonZero behaves similar to numpy.nonzero: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, +but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy’s behavior.

+

Inputs

+
    +
  • X (heterogeneous)T: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(int64): output

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxNonZero_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonZero_13(*args, **kwargs)#
+

Version

+

Onnx name: NonZero

+

This version of the operator has been available since +version 13.

+

Summary

+

Returns the indices of the elements that are non-zero +(in row-major order - by dimension). +NonZero behaves similar to numpy.nonzero: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, +but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy’s behavior.

+

Inputs

+
    +
  • X (heterogeneous)T: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(int64): output

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxNonZero_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNonZero_9(*args, **kwargs)#
+

Version

+

Onnx name: NonZero

+

This version of the operator has been available since +version 9.

+

Summary

+

Returns the indices of the elements that are non-zero +(in row-major order - by dimension). +NonZero behaves similar to numpy.nonzero: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, +but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy’s behavior.

+

Inputs

+
    +
  • X (heterogeneous)T: input

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(int64): output

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxNormalizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNormalizer(*args, **kwargs)#
+

Version

+

Onnx name: Normalizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+
+

Normalize the input. There are three normalization modes, which have the corresponding formulas, +defined using element-wise infix operators ‘/’ and ‘^’ and tensor-wide functions ‘max’ and ‘sum’:

+

Max: Y = X / max(X)

+

L1: Y = X / sum(X)

+

L2: Y = sqrt(X^2 / sum(X^2)}

+

In all modes, if the divisor is zero, Y == X.

+

For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row +of the batch is normalized independently.

+
+

Attributes

+
    +
  • norm: One of ‘MAX,’ ‘L1,’ ‘L2’ Default value is +name: "norm" s: "MAX" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be encoded, a tensor of shape [N,C] or [C]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Encoded output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxNormalizer_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNormalizer_1(*args, **kwargs)#
+

Version

+

Onnx name: Normalizer

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+
+

Normalize the input. There are three normalization modes, which have the corresponding formulas, +defined using element-wise infix operators ‘/’ and ‘^’ and tensor-wide functions ‘max’ and ‘sum’:

+

Max: Y = X / max(X)

+

L1: Y = X / sum(X)

+

L2: Y = sqrt(X^2 / sum(X^2)}

+

In all modes, if the divisor is zero, Y == X.

+

For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row +of the batch is normalized independently.

+
+

Attributes

+
    +
  • norm: One of ‘MAX,’ ‘L1,’ ‘L2’ Default value is +name: "norm" s: "MAX" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be encoded, a tensor of shape [N,C] or [C]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Encoded output data

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxNot#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNot(*args, **kwargs)#
+

Version

+

Onnx name: Not

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the negation of the input tensor element-wise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input/output to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxNot_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxNot_1(*args, **kwargs)#
+

Version

+

Onnx name: Not

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the negation of the input tensor element-wise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input/output to boolean tensors.

  • +
+
+ +
+
+
+
+

OnnxOneHot#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOneHot(*args, **kwargs)#
+

Version

+

Onnx name: OneHot

+

This version of the operator has been available since +version 11.

+

Summary

+

Produces a one-hot tensor based on inputs. +The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ +and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ +are specified as part of required input argument ‘values’, which is a two-element tensor of format +[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the +input tensor. The additional dimension is for one-hot representation. The additional dimension will +be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional +dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional +dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same +as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside +the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the +output tensor.

+

when axis = 0: +output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.

+

when axis = -1: +output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.

+

Attributes

+
    +
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices). Default value is +name: "axis" i: -1 type: INT

  • +
+

Inputs

+
    +
  • indices (heterogeneous)T1: Input tensor containing indices. Any entries in the ‘indices’ input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • +
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [-depth, depth-1]. In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • +
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxOneHotEncoder#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder(*args, **kwargs)#
+

Version

+

Onnx name: OneHotEncoder

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Replace each input element with an array of ones and zeros, where a single +one is placed at the index of the category that was passed in. The total category count +will determine the size of the extra dimension of the output array Y.

+

For example, if we pass a tensor with a single value of 4, and a category count of 8, +the output will be a tensor with [0,0,0,0,1,0,0,0].

+

This operator assumes every input feature is from the same set of categories.

+

If the input is a tensor of float, int32, or double, the data will be cast +to integers and the cats_int64s category list will be used for the lookups.

+

Attributes

+
    +
  • +
  • +
  • zeros: If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail. Default value is +name: "zeros" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be encoded.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Encoded output data, having one more dimension than X.

  • +
+

Type Constraints

+
    +
  • T tensor(string), tensor(int64), tensor(int32), tensor(float), tensor(double): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxOneHotEncoder_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOneHotEncoder_1(*args, **kwargs)#
+

Version

+

Onnx name: OneHotEncoder

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Replace each input element with an array of ones and zeros, where a single +one is placed at the index of the category that was passed in. The total category count +will determine the size of the extra dimension of the output array Y.

+

For example, if we pass a tensor with a single value of 4, and a category count of 8, +the output will be a tensor with [0,0,0,0,1,0,0,0].

+

This operator assumes every input feature is from the same set of categories.

+

If the input is a tensor of float, int32, or double, the data will be cast +to integers and the cats_int64s category list will be used for the lookups.

+

Attributes

+
    +
  • +
  • +
  • zeros: If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail. Default value is +name: "zeros" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be encoded.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Encoded output data, having one more dimension than X.

  • +
+

Type Constraints

+
    +
  • T tensor(string), tensor(int64), tensor(int32), tensor(float), tensor(double): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxOneHot_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOneHot_11(*args, **kwargs)#
+

Version

+

Onnx name: OneHot

+

This version of the operator has been available since +version 11.

+

Summary

+

Produces a one-hot tensor based on inputs. +The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ +and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ +are specified as part of required input argument ‘values’, which is a two-element tensor of format +[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the +input tensor. The additional dimension is for one-hot representation. The additional dimension will +be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional +dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional +dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same +as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside +the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the +output tensor.

+

when axis = 0: +output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.

+

when axis = -1: +output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.

+

Attributes

+
    +
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices). Default value is +name: "axis" i: -1 type: INT

  • +
+

Inputs

+
    +
  • indices (heterogeneous)T1: Input tensor containing indices. Any entries in the ‘indices’ input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • +
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [-depth, depth-1]. In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • +
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxOneHot_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOneHot_9(*args, **kwargs)#
+

Version

+

Onnx name: OneHot

+

This version of the operator has been available since +version 9.

+

Summary

+

Produces a one-hot tensor based on inputs. +The locations represented by the index values in the ‘indices’ input tensor will have ‘on_value’ +and the other locations will have ‘off_value’ in the output tensor, where ‘on_value’ and ‘off_value’ +are specified as part of required input argument ‘values’, which is a two-element tensor of format +[off_value, on_value]. The rank of the output tensor will be one greater than the rank of the +input tensor. The additional dimension is for one-hot representation. The additional dimension will +be inserted at the position specified by ‘axis’. If ‘axis’ is not specified then then additional +dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional +dimension is specified by required scalar input ‘depth’. The type of the output tensor is the same +as the type of the ‘values’ input. Any entries in the ‘indices’ input tensor with values outside +the range [0, depth) will result in one-hot representation with all ‘off_value’ values in the +output tensor.

+

Attributes

+
    +
  • axis: (Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Default value is +name: "axis" i: -1 type: INT

  • +
+

Inputs

+
    +
  • indices (heterogeneous)T1: Input tensor containing indices. The values must be non-negative integers. Any entries in the ‘indices’ input tensor with values outside the range [0, depth) will result in one-hot representation with all ‘off_value’ values in the output tensor.In case ‘indices’ is of non-integer type, the values will be casted to int64 before use.

  • +
  • depth (heterogeneous)T2: Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by ‘axis’ attribute) added on in the output tensor. The values in the ‘indices’ input tensor are expected to be in the range [0, depth). In case ‘depth’ is of non-integer type, it will be casted to int64 before use.

  • +
  • values (heterogeneous)T3: Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where ‘on_value’ is the value used for filling locations specified in ‘indices’ input tensor, and ‘off_value’ is the value used for filling locations other than those specified in ‘indices’ input tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T3: Tensor of rank one greater than input tensor ‘indices’, i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input ‘values’ is used.

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T2 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
  • T3 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxOptional#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptional(*args, **kwargs)#
+

Version

+

Onnx name: Optional

+

This version of the operator has been available since +version 15.

+

Summary

+

Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, +or a non-empty value containing the input element.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 0 and 1 inputs.

+
    +
  • input (optional, heterogeneous)V: The input element.

  • +
+

Outputs

+
    +
  • output (heterogeneous)O: The optional output enclosing the input element.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to all tensor and sequence types.

  • +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain output type to all optional tensor or optional sequence types.

  • +
+
+ +
+
+
+
+

OnnxOptionalGetElement#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement(*args, **kwargs)#
+

Version

+

Onnx name: OptionalGetElement

+

This version of the operator has been available since +version 18.

+

Summary

+

If the input is a tensor or sequence type, it returns the input. +If the input is an optional type, it outputs the element in the input. +It is an error if the input is an empty optional-type (i.e. does not have an element) and the behavior is undefined in this case.

+

Inputs

+
    +
  • input (heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Output element in the optional input.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output type to all tensor or sequence types.

  • +
+
+ +
+
+
+
+

OnnxOptionalGetElement_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement_15(*args, **kwargs)#
+

Version

+

Onnx name: OptionalGetElement

+

This version of the operator has been available since +version 15.

+

Summary

+

Outputs the element in the optional-type input. It is an error if the input value does not have an element +and the behavior is undefined in this case.

+

Inputs

+
    +
  • input (heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Output element in the optional input.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output type to all tensor or sequence types.

  • +
+
+ +
+
+
+
+

OnnxOptionalGetElement_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalGetElement_18(*args, **kwargs)#
+

Version

+

Onnx name: OptionalGetElement

+

This version of the operator has been available since +version 18.

+

Summary

+

If the input is a tensor or sequence type, it returns the input. +If the input is an optional type, it outputs the element in the input. +It is an error if the input is an empty optional-type (i.e. does not have an element) and the behavior is undefined in this case.

+

Inputs

+
    +
  • input (heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)V: Output element in the optional input.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output type to all tensor or sequence types.

  • +
+
+ +
+
+
+
+

OnnxOptionalHasElement#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement(*args, **kwargs)#
+

Version

+

Onnx name: OptionalHasElement

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns true if (1) the input is an optional-type and contains an element, +or, (2) the input is a tensor or sequence type. +If the input is not provided or is an empty optional-type, this op returns false.

+

Inputs

+

Between 0 and 1 inputs.

+
    +
  • input (optional, heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)B: A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • B tensor(bool): Constrain output to a boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxOptionalHasElement_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement_15(*args, **kwargs)#
+

Version

+

Onnx name: OptionalHasElement

+

This version of the operator has been available since +version 15.

+

Summary

+

Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false.

+

Inputs

+
    +
  • input (heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)B: A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • B tensor(bool): Constrain output to a boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxOptionalHasElement_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptionalHasElement_18(*args, **kwargs)#
+

Version

+

Onnx name: OptionalHasElement

+

This version of the operator has been available since +version 18.

+

Summary

+

Returns true if (1) the input is an optional-type and contains an element, +or, (2) the input is a tensor or sequence type. +If the input is not provided or is an empty optional-type, this op returns false.

+

Inputs

+

Between 0 and 1 inputs.

+
    +
  • input (optional, heterogeneous)O: The optional input.

  • +
+

Outputs

+
    +
  • output (heterogeneous)B: A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.

  • +
+

Type Constraints

+
    +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to optional tensor and optional sequence types.

  • +
  • B tensor(bool): Constrain output to a boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxOptional_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOptional_15(*args, **kwargs)#
+

Version

+

Onnx name: Optional

+

This version of the operator has been available since +version 15.

+

Summary

+

Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, +or a non-empty value containing the input element.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 0 and 1 inputs.

+
    +
  • input (optional, heterogeneous)V: The input element.

  • +
+

Outputs

+
    +
  • output (heterogeneous)O: The optional output enclosing the input element.

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input type to all tensor and sequence types.

  • +
  • O optional(seq(tensor(uint8))), optional(seq(tensor(uint16))), optional(seq(tensor(uint32))), optional(seq(tensor(uint64))), optional(seq(tensor(int8))), optional(seq(tensor(int16))), optional(seq(tensor(int32))), optional(seq(tensor(int64))), optional(seq(tensor(float16))), optional(seq(tensor(float))), optional(seq(tensor(double))), optional(seq(tensor(string))), optional(seq(tensor(bool))), optional(seq(tensor(complex64))), optional(seq(tensor(complex128))), optional(tensor(uint8)), optional(tensor(uint16)), optional(tensor(uint32)), optional(tensor(uint64)), optional(tensor(int8)), optional(tensor(int16)), optional(tensor(int32)), optional(tensor(int64)), optional(tensor(float16)), optional(tensor(float)), optional(tensor(double)), optional(tensor(string)), optional(tensor(bool)), optional(tensor(complex64)), optional(tensor(complex128)): Constrain output type to all optional tensor or optional sequence types.

  • +
+
+ +
+
+
+
+

OnnxOr#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOr(*args, **kwargs)#
+

Version

+

Onnx name: Or

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the or logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxOr_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOr_1(*args, **kwargs)#
+

Version

+

Onnx name: Or

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the or logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxOr_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxOr_7(*args, **kwargs)#
+

Version

+

Onnx name: Or

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the or logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxPRelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 16.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise. +This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor (same size as X)

  • +
+

Type Constraints

+
    +
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPRelu_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu_1(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 1.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. If Slope is of size 1, the value is sharedacross different channels

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPRelu_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu_16(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 16.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise. +This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor (same size as X)

  • +
+

Type Constraints

+
    +
  • T tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPRelu_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu_6(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 6.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. If Slope is of size 1, the value is sharedacross different channels

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPRelu_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu_7(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 7.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise. +This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor (same size as X)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPRelu_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPRelu_9(*args, **kwargs)#
+

Version

+

Onnx name: PRelu

+

This version of the operator has been available since +version 9.

+

Summary

+

PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one +output data (Tensor<T>) where the function f(x) = slope * x for x < 0, +f(x) = x for x >= 0., is applied to the data tensor elementwise. +This operator supports unidirectional broadcasting (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
  • slope (heterogeneous)T: Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor (same size as X)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(uint32), tensor(uint64), tensor(int32), tensor(int64): Constrain input and output types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPad#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 19.

+

Summary

+

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, +a padded tensor (output) is generated.

+

The three supported modes are (similar to corresponding modes supported by numpy.pad):

+
    +
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. +
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. +
  5. edge - pads with the edge values of array

  6. +
  7. wrap - wrap-around padding as if the data tensor forms a torus

  8. +
+

Example 1 (constant mode):

+

Insert 0 pads to the beginning of the second dimension.

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'constant'
+
+constant_value = 0.0
+
+output = [
+    [0.0, 0.0, 1.0, 1.2],
+    [0.0, 0.0, 2.3, 3.4],
+    [0.0, 0.0, 4.5, 5.7],
+]
+
+
+

Example 2 (reflect mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'reflect'
+
+output = [
+    [1.0, 1.2, 1.0, 1.2],
+    [2.3, 3.4, 2.3, 3.4],
+    [4.5, 5.7, 4.5, 5.7],
+]
+
+
+

Example 3 (edge mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'edge'
+
+output = [
+    [1.0, 1.0, 1.0, 1.2],
+    [2.3, 2.3, 2.3, 3.4],
+    [4.5, 4.5, 4.5, 5.7],
+]
+
+
+

Example 4 (wrap mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [2, 1, 1, 1]
+
+mode = 'wrap'
+
+output = [
+    [3.4, 2.3, 3.4, 2.3],
+    [5.7, 4.5, 5.7, 4.5],
+    [1.2, 1.0, 1.2, 1.0],
+    [3.4, 2.3, 3.4, 2.3],
+    [5.7, 4.5, 5.7, 4.5],
+    [1.2, 1.0, 1.2, 1.0],
+]
+
+
+

Attributes

+
    +
  • mode: Supported modes: constant`(default), `reflect, edge, wrap Default value is +name: "mode" s: "constant" type: STRING

  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * num_axes] where num_axes refers to the number of elements in the axes input or the input rank if axes are not provided explicitly. pads format should be: [x1_begin, x2_begin, …, x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis axes[i] and xi_end, the number of pad values added at the end of axis axes[i].

  • +
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that pads apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed ([0, 1, …, input_rank-1]).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxPad_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_1(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 1.

+

Summary

+

Given data tensor, paddings, mode, and value. +Example:

+
+

Insert 0 paddings to the beginning of the second dimension. +data = [

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

] +paddings = [0, 0, 2, 0] +output = [

+
+
+
[

[0.0, 0.0, 1.0, 1.2], +[0.0, 0.0, 2.3, 3.4], +[0.0, 0.0, 4.5, 5.7],

+
+
+

],

+
+

]

+
+

Attributes

+
    +
  • mode: Three modes: constant(default), reflect, edge Default value is +name: "mode" s: "constant" type: STRING

  • +
  • +
  • value: One float, indicates the value to be filled, default is 0 Default value is +name: "value" f: 0.0 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPad_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_11(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 11.

+

Summary

+

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, +a padded tensor (output) is generated.

+

The three supported modes are (similar to corresponding modes supported by numpy.pad):

+
    +
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0)

  2. +
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. +
  5. edge - pads with the edge values of array

  6. +
+
+
Example 1 (constant mode):

Insert 0 pads to the beginning of the second dimension.

+

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘constant’

+

constant_value = 0.0

+

output = +[

+
+

[0.0, 0.0, 1.0, 1.2], +[0.0, 0.0, 2.3, 3.4], +[0.0, 0.0, 4.5, 5.7],

+
+

]

+
+
Example 2 (reflect mode):

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘reflect’

+

output = +[

+
+

[1.0, 1.2, 1.0, 1.2], +[2.3, 3.4, 2.3, 3.4], +[4.5, 5.7, 4.5, 5.7],

+
+

]

+
+
Example 3 (edge mode):

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘edge’

+

output = +[

+
+

[1.0, 1.0, 1.0, 1.2], +[2.3, 2.3, 2.3, 3.4], +[4.5, 4.5, 4.5, 5.7],

+
+

]

+
+
+

Attributes

+
    +
  • mode: Supported modes: constant`(default), `reflect, edge Default value is +name: "mode" s: "constant" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * input_rank]. pads format should be: [x1_begin, x2_begin,…,x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis i and xi_end, the number of pad values added at the end of axis i.

  • +
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output to only numeric types.

  • +
+
+ +
+
+
+
+

OnnxPad_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_13(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 13.

+

Summary

+

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, +a padded tensor (output) is generated.

+

The three supported modes are (similar to corresponding modes supported by numpy.pad):

+
    +
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. +
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. +
  5. edge - pads with the edge values of array

  6. +
+
+
Example 1 (constant mode):

Insert 0 pads to the beginning of the second dimension.

+

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘constant’

+

constant_value = 0.0

+

output = +[

+
+

[0.0, 0.0, 1.0, 1.2], +[0.0, 0.0, 2.3, 3.4], +[0.0, 0.0, 4.5, 5.7],

+
+

]

+
+
Example 2 (reflect mode):

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘reflect’

+

output = +[

+
+

[1.0, 1.2, 1.0, 1.2], +[2.3, 3.4, 2.3, 3.4], +[4.5, 5.7, 4.5, 5.7],

+
+

]

+
+
Example 3 (edge mode):

data = +[

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

]

+

pads = [0, 2, 0, 0]

+

mode = ‘edge’

+

output = +[

+
+

[1.0, 1.0, 1.0, 1.2], +[2.3, 2.3, 2.3, 3.4], +[4.5, 4.5, 4.5, 5.7],

+
+

]

+
+
+

Attributes

+
    +
  • mode: Supported modes: constant`(default), `reflect, edge Default value is +name: "mode" s: "constant" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * input_rank]. pads format should be: [x1_begin, x2_begin,…,x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis i and xi_end, the number of pad values added at the end of axis i.

  • +
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxPad_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_18(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 18.

+

Summary

+

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, +a padded tensor (output) is generated.

+

The three supported modes are (similar to corresponding modes supported by numpy.pad):

+
    +
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. +
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. +
  5. edge - pads with the edge values of array

  6. +
+

Example 1 (constant mode):

+

Insert 0 pads to the beginning of the second dimension.

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'constant'
+
+constant_value = 0.0
+
+output = [
+    [0.0, 0.0, 1.0, 1.2],
+    [0.0, 0.0, 2.3, 3.4],
+    [0.0, 0.0, 4.5, 5.7],
+]
+
+
+

Example 2 (reflect mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'reflect'
+
+output = [
+    [1.0, 1.2, 1.0, 1.2],
+    [2.3, 3.4, 2.3, 3.4],
+    [4.5, 5.7, 4.5, 5.7],
+]
+
+
+

Example 3 (edge mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'edge'
+
+output = [
+    [1.0, 1.0, 1.0, 1.2],
+    [2.3, 2.3, 2.3, 3.4],
+    [4.5, 4.5, 4.5, 5.7],
+]
+
+
+

Attributes

+
    +
  • mode: Supported modes: constant`(default), `reflect, edge Default value is +name: "mode" s: "constant" type: STRING

  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * num_axes] where num_axes refers to the number of elements in the axes input or the input rank if axes are not provided explicitly. pads format should be: [x1_begin, x2_begin, …, x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis axes[i] and xi_end, the number of pad values added at the end of axis axes[i].

  • +
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that pads apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed ([0, 1, …, input_rank-1]).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxPad_19#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_19(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 19.

+

Summary

+

Given a tensor containing the data to be padded (data), a tensor containing the number of start and end pad values for axis (pads), (optionally) a mode, and (optionally) constant_value, +a padded tensor (output) is generated.

+

The three supported modes are (similar to corresponding modes supported by numpy.pad):

+
    +
  1. constant`(default) - pads with a given constant value as specified by `constant_value (which defaults to 0, empty string, or False)

  2. +
  3. reflect - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis

  4. +
  5. edge - pads with the edge values of array

  6. +
  7. wrap - wrap-around padding as if the data tensor forms a torus

  8. +
+

Example 1 (constant mode):

+

Insert 0 pads to the beginning of the second dimension.

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'constant'
+
+constant_value = 0.0
+
+output = [
+    [0.0, 0.0, 1.0, 1.2],
+    [0.0, 0.0, 2.3, 3.4],
+    [0.0, 0.0, 4.5, 5.7],
+]
+
+
+

Example 2 (reflect mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'reflect'
+
+output = [
+    [1.0, 1.2, 1.0, 1.2],
+    [2.3, 3.4, 2.3, 3.4],
+    [4.5, 5.7, 4.5, 5.7],
+]
+
+
+

Example 3 (edge mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [0, 2, 0, 0]
+
+mode = 'edge'
+
+output = [
+    [1.0, 1.0, 1.0, 1.2],
+    [2.3, 2.3, 2.3, 3.4],
+    [4.5, 4.5, 4.5, 5.7],
+]
+
+
+

Example 4 (wrap mode):

+
data = [
+    [1.0, 1.2],
+    [2.3, 3.4],
+    [4.5, 5.7],
+]
+
+pads = [2, 1, 1, 1]
+
+mode = 'wrap'
+
+output = [
+    [3.4, 2.3, 3.4, 2.3],
+    [5.7, 4.5, 5.7, 4.5],
+    [1.2, 1.0, 1.2, 1.0],
+    [3.4, 2.3, 3.4, 2.3],
+    [5.7, 4.5, 5.7, 4.5],
+    [1.2, 1.0, 1.2, 1.0],
+]
+
+
+

Attributes

+
    +
  • mode: Supported modes: constant`(default), `reflect, edge, wrap Default value is +name: "mode" s: "constant" type: STRING

  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
  • pads (heterogeneous)tensor(int64): Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. pads should be a 1D tensor of shape [2 * num_axes] where num_axes refers to the number of elements in the axes input or the input rank if axes are not provided explicitly. pads format should be: [x1_begin, x2_begin, …, x1_end, x2_end,…], where xi_begin is the number of pad values added at the beginning of axis axes[i] and xi_end, the number of pad values added at the end of axis axes[i].

  • +
  • constant_value (optional, heterogeneous)T: (Optional) A scalar value to be used if the mode chosen is constant (by default it is 0, empty string or False).

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that pads apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated. If not provided, all axes are assumed ([0, 1, …, input_rank-1]).

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxPad_2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPad_2(*args, **kwargs)#
+

Version

+

Onnx name: Pad

+

This version of the operator has been available since +version 2.

+

Summary

+

Given data tensor, pads, mode, and value. +Example:

+
+

Insert 0 pads to the beginning of the second dimension. +data = [

+
+

[1.0, 1.2], +[2.3, 3.4], +[4.5, 5.7],

+
+

] +pads = [0, 2, 0, 0] +output = [

+
+
+
[

[0.0, 0.0, 1.0, 1.2], +[0.0, 0.0, 2.3, 3.4], +[0.0, 0.0, 4.5, 5.7],

+
+
+

],

+
+

]

+
+

Attributes

+
    +
  • mode: Three modes: constant(default), reflect, edge Default value is +name: "mode" s: "constant" type: STRING

  • +
  • +
  • value: One float, indicates the value to be filled. Default value is +name: "value" f: 0.0 type: FLOAT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Input tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor after padding.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPow#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 15.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: First operand, base of the exponent.

  • +
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input Y types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPow_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow_1(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 1.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise.

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor of any shape, base of the exponent.

  • +
  • Y (heterogeneous)T: Input tensor of any shape broadcastable to X shape, the exponent component.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor (same size as X)

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxPow_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow_12(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 12.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: First operand, base of the exponent.

  • +
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input X and output types to float/int tensors.

  • +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input Y types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPow_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow_13(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 13.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: First operand, base of the exponent.

  • +
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input Y types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPow_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow_15(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 15.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: First operand, base of the exponent.

  • +
  • Y (heterogeneous)T1: Second operand, power of the exponent.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input X and output types to float/int tensors.

  • +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input Y types to float/int tensors.

  • +
+
+ +
+
+
+
+

OnnxPow_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxPow_7(*args, **kwargs)#
+

Version

+

Onnx name: Pow

+

This version of the operator has been available since +version 7.

+

Summary

+

Pow takes input data (Tensor<T>) and exponent Tensor, and +produces one output data (Tensor<T>) where the function f(x) = x^exponent, +is applied to the data tensor elementwise. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • X (heterogeneous)T: First operand, base of the exponent.

  • +
  • Y (heterogeneous)T: Second operand, power of the exponent.

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxQLinearConv#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQLinearConv(*args, **kwargs)#
+

Version

+

Onnx name: QLinearConv

+

This version of the operator has been available since +version 10.

+

Summary

+

The convolution operator consumes a quantized input tensor, its scale and zero point, +a quantized filter, its scale and zero point, and output’s scale and zero point, +and computes the quantized output. Each scale and zero-point pair must have same shape. +It means they must be either scalars (per tensor) or 1-D tensors (per output channel). +Each input or output and its related zero point must have same type. +When bias is present it must be quantized using scale = input scale * weight scale and +zero point as 0.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 8 and 9 inputs.

+
    +
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • x_scale (heterogeneous)tensor(float): Scale tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • x_zero_point (heterogeneous)T1: Zero point tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • +
  • w_scale (heterogeneous)tensor(float): Scale tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • +
  • w_zero_point (heterogeneous)T2: Zero point tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • +
  • y_scale (heterogeneous)tensor(float): Scale tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • y_zero_point (heterogeneous)T3: Zero point tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • B (optional, heterogeneous)T4: Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain filter type to 8-bit integer tensor.

  • +
  • T3 tensor(int8), tensor(uint8): Constrain output type to 8-bit integer tensor.

  • +
  • T4 tensor(int32): Constrain bias type to 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQLinearConv_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQLinearConv_10(*args, **kwargs)#
+

Version

+

Onnx name: QLinearConv

+

This version of the operator has been available since +version 10.

+

Summary

+

The convolution operator consumes a quantized input tensor, its scale and zero point, +a quantized filter, its scale and zero point, and output’s scale and zero point, +and computes the quantized output. Each scale and zero-point pair must have same shape. +It means they must be either scalars (per tensor) or 1-D tensors (per output channel). +Each input or output and its related zero point must have same type. +When bias is present it must be quantized using scale = input scale * weight scale and +zero point as 0.

+

Attributes

+
    +
  • auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. Default value is +name: "auto_pad" s: "NOTSET" type: STRING

  • +
  • +
  • group: number of groups input channels and output channels are divided into. default is 1. Default value is +name: "group" i: 1 type: INT

  • +
  • +
  • +
  • +
+

Inputs

+

Between 8 and 9 inputs.

+
    +
  • x (heterogeneous)T1: Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 … x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE …].

  • +
  • x_scale (heterogeneous)tensor(float): Scale tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • x_zero_point (heterogeneous)T1: Zero point tensor for input ‘x’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • w (heterogeneous)T2: The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x … x kn), where (k1 x k2 x … kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL …]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL.

  • +
  • w_scale (heterogeneous)tensor(float): Scale tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • +
  • w_zero_point (heterogeneous)T2: Zero point tensor for input ‘w’. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it’s a 1-D tensor, its number of elements should be equal to the number of output channels (M).

  • +
  • y_scale (heterogeneous)tensor(float): Scale tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • y_zero_point (heterogeneous)T3: Zero point tensor for output ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • B (optional, heterogeneous)T4: Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain filter type to 8-bit integer tensor.

  • +
  • T3 tensor(int8), tensor(uint8): Constrain output type to 8-bit integer tensor.

  • +
  • T4 tensor(int32): Constrain bias type to 32-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQLinearMatMul#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul(*args, **kwargs)#
+

Version

+

Onnx name: QLinearMatMul

+

This version of the operator has been available since +version 10.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, +and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). +For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. +Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor +(per row for ‘a’ and per column for ‘b’). Scalar refers to per tensor quantization whereas N-D refers to per row +or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be +an M element vector [v_1, v_2, …, v_M] for per row quantization and K element vector of shape [v_1, v_2, …, v_K] +for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may +have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. +Production must never overflow, and accumulation may overflow if and only if in 32 bits.

+

Inputs

+
    +
  • a (heterogeneous)T1: N-dimensional quantized matrix a

  • +
  • a_scale (heterogeneous)tensor(float): scale of quantized input a

  • +
  • a_zero_point (heterogeneous)T1: zero point of quantized input a

  • +
  • b (heterogeneous)T2: N-dimensional quantized matrix b

  • +
  • b_scale (heterogeneous)tensor(float): scale of quantized input b

  • +
  • b_zero_point (heterogeneous)T2: zero point of quantized input b

  • +
  • y_scale (heterogeneous)tensor(float): scale of quantized output y

  • +
  • y_zero_point (heterogeneous)T3: zero point of quantized output y

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Quantized matrix multiply results from a * b

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input a and its zero point data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input b and its zero point data type to 8-bit integer tensor.

  • +
  • T3 tensor(int8), tensor(uint8): Constrain output y and its zero point data type to 8-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQLinearMatMul_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQLinearMatMul_10(*args, **kwargs)#
+

Version

+

Onnx name: QLinearMatMul

+

This version of the operator has been available since +version 10.

+

Summary

+

Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, +and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). +For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. +Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor +(per row for ‘a’ and per column for ‘b’). Scalar refers to per tensor quantization whereas N-D refers to per row +or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be +an M element vector [v_1, v_2, …, v_M] for per row quantization and K element vector of shape [v_1, v_2, …, v_K] +for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may +have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. +Production must never overflow, and accumulation may overflow if and only if in 32 bits.

+

Inputs

+
    +
  • a (heterogeneous)T1: N-dimensional quantized matrix a

  • +
  • a_scale (heterogeneous)tensor(float): scale of quantized input a

  • +
  • a_zero_point (heterogeneous)T1: zero point of quantized input a

  • +
  • b (heterogeneous)T2: N-dimensional quantized matrix b

  • +
  • b_scale (heterogeneous)tensor(float): scale of quantized input b

  • +
  • b_zero_point (heterogeneous)T2: zero point of quantized input b

  • +
  • y_scale (heterogeneous)tensor(float): scale of quantized output y

  • +
  • y_zero_point (heterogeneous)T3: zero point of quantized output y

  • +
+

Outputs

+
    +
  • y (heterogeneous)T3: Quantized matrix multiply results from a * b

  • +
+

Type Constraints

+
    +
  • T1 tensor(int8), tensor(uint8): Constrain input a and its zero point data type to 8-bit integer tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain input b and its zero point data type to 8-bit integer tensor.

  • +
  • T3 tensor(int8), tensor(uint8): Constrain output y and its zero point data type to 8-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQuantizeLinear#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear(*args, **kwargs)#
+

Version

+

Onnx name: QuantizeLinear

+

This version of the operator has been available since +version 13.

+

Summary

+

The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. +The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. +The quantization formula is y = saturate ((x / y_scale) + y_zero_point). +For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. +For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

+

Attributes

+
    +
  • axis: (Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • +
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization.

  • +
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. Shape must match y_scale. Default is uint8 with zero point of 0 if it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQuantizeLinear_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_10(*args, **kwargs)#
+

Version

+

Onnx name: QuantizeLinear

+

This version of the operator has been available since +version 10.

+

Summary

+

The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor. +The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. +For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • +
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It’s a scalar, which means a per-tensor/layer quantization.

  • +
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. It’s a scalar, which means a per-tensor/layer quantization. Default value is uint8 typed 0 if it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxQuantizeLinear_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxQuantizeLinear_13(*args, **kwargs)#
+

Version

+

Onnx name: QuantizeLinear

+

This version of the operator has been available since +version 13.

+

Summary

+

The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. +The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. +The quantization formula is y = saturate ((x / y_scale) + y_zero_point). +For saturation, it saturates to [0, 255] if it’s uint8, or [-128, 127] if it’s int8. +For (x / y_scale), it’s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ‘y_zero_point’ and ‘y’ must have same type.

+

Attributes

+
    +
  • axis: (Optional) The axis of the quantization dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • x (heterogeneous)T1: N-D full precision Input tensor to be quantized.

  • +
  • y_scale (heterogeneous)tensor(float): Scale for doing quantization to get ‘y’. It can be a scalar, which means per-tensor/layer quantization, or a 1-D Tensor for per-axis quantization.

  • +
  • y_zero_point (optional, heterogeneous)T2: Zero point for doing quantization to get ‘y’. Shape must match y_scale. Default is uint8 with zero point of 0 if it’s not specified.

  • +
+

Outputs

+
    +
  • y (heterogeneous)T2: N-D quantized output tensor. It has same shape as input ‘x’.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(int32): Constrain ‘x’ to float or int32 tensor.

  • +
  • T2 tensor(int8), tensor(uint8): Constrain ‘y_zero_point’ and ‘y’ to 8-bit integer tensor.

  • +
+
+ +
+
+
+
+

OnnxRNN#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRNN(*args, **kwargs)#
+

Version

+

Onnx name: RNN

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer simple RNN. This operator is usually supported +via some custom implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • i - input gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • Wi - W parameter weight matrix for input gate

  • +
  • Ri - R recurrence weight matrix for input gate

  • +
  • Wbi - W parameter bias vector for input gate

  • +
  • Rbi - R parameter bias vector for input gate

  • +
  • WBi - W parameter weight matrix for backward input gate

  • +
  • RBi - R recurrence weight matrix for backward input gate

  • +
  • WBbi - WR bias vectors for backward input gate

  • +
  • RBbi - RR bias vectors for backward input gate

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+

NOTE: Below are optional

+
    +
  • Affine(x) - alpha*x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha*Tanh(beta*x)

  • +
  • HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha*(e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Tanh):

+
    +
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is +name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxRNN_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRNN_1(*args, **kwargs)#
+

Version

+

Onnx name: RNN

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes an one-layer simple RNN. This operator is usually supported +via some custom implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

i - input gate

+

t - time step (t-1 means previous time step)

+

Wi - W parameter weight matrix for input gate

+

Ri - R recurrence weight matrix for input gate

+

Wbi - W parameter bias vector for input gate

+

Rbi - R parameter bias vector for input gate

+

WBi - W parameter weight matrix for backward input gate

+

RBi - R recurrence weight matrix for backward input gate

+

WBbi - WR bias vectors for backward input gate

+

RBbi - RR bias vectors for backward input gate

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Tanh):

+
+
    +
  • Ht = f(Xt*(Wi^T) + Ht-1*Ri + Wbi + Rbi)

  • +
+
+

Attributes

+
    +
  • +
  • +
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is +name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • output_sequence: The sequence output for the hidden is optional if 0. Default 0. Default value is +name: "output_sequence" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size]. It is optional if output_sequence is 0.

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxRNN_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRNN_14(*args, **kwargs)#
+

Version

+

Onnx name: RNN

+

This version of the operator has been available since +version 14.

+

Summary

+

Computes an one-layer simple RNN. This operator is usually supported +via some custom implementation such as CuDNN.

+

Notations:

+
    +
  • X - input tensor

  • +
  • i - input gate

  • +
  • t - time step (t-1 means previous time step)

  • +
  • Wi - W parameter weight matrix for input gate

  • +
  • Ri - R recurrence weight matrix for input gate

  • +
  • Wbi - W parameter bias vector for input gate

  • +
  • Rbi - R parameter bias vector for input gate

  • +
  • WBi - W parameter weight matrix for backward input gate

  • +
  • RBi - R recurrence weight matrix for backward input gate

  • +
  • WBbi - WR bias vectors for backward input gate

  • +
  • RBbi - RR bias vectors for backward input gate

  • +
  • H - Hidden state

  • +
  • num_directions - 2 if direction == bidirectional else 1

  • +
+

Activation functions:

+
    +
  • Relu(x) - max(0, x)

  • +
  • Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

  • +
  • Sigmoid(x) - 1/(1 + e^{-x})

  • +
+

NOTE: Below are optional

+
    +
  • Affine(x) - alpha*x + beta

  • +
  • LeakyRelu(x) - x if x >= 0 else alpha * x

  • +
  • ThresholdedRelu(x) - x if x >= alpha else 0

  • +
  • ScaledTanh(x) - alpha*Tanh(beta*x)

  • +
  • HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

  • +
  • Elu(x) - x if x >= 0 else alpha*(e^x - 1)

  • +
  • Softsign(x) - x/(1 + |x|)

  • +
  • Softplus(x) - log(1 + e^x)

  • +
+

Equations (Default: f=Tanh):

+
    +
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • +
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is +name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
  • layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. Default value is +name: "layout" i: 0 type: INT

  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxRNN_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRNN_7(*args, **kwargs)#
+

Version

+

Onnx name: RNN

+

This version of the operator has been available since +version 7.

+

Summary

+

Computes an one-layer simple RNN. This operator is usually supported +via some custom implementation such as CuDNN.

+

Notations:

+

X - input tensor

+

i - input gate

+

t - time step (t-1 means previous time step)

+

Wi - W parameter weight matrix for input gate

+

Ri - R recurrence weight matrix for input gate

+

Wbi - W parameter bias vector for input gate

+

Rbi - R parameter bias vector for input gate

+

WBi - W parameter weight matrix for backward input gate

+

RBi - R recurrence weight matrix for backward input gate

+

WBbi - WR bias vectors for backward input gate

+

RBbi - RR bias vectors for backward input gate

+

H - Hidden state

+

num_directions - 2 if direction == bidirectional else 1

+

Activation functions:

+
+

Relu(x) - max(0, x)

+

Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})

+

Sigmoid(x) - 1/(1 + e^{-x})

+

(NOTE: Below are optional)

+

Affine(x) - alpha*x + beta

+

LeakyRelu(x) - x if x >= 0 else alpha * x

+

ThresholdedRelu(x) - x if x >= alpha else 0

+

ScaledTanh(x) - alpha*Tanh(beta*x)

+

HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)

+

Elu(x) - x if x >= 0 else alpha*(e^x - 1)

+

Softsign(x) - x/(1 + |x|)

+

Softplus(x) - log(1 + e^x)

+
+

Equations (Default: f=Tanh):

+
+
    +
  • Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)

  • +
+
+

This operator has optional inputs/outputs. See ONNX for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument’s name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

+

Attributes

+
    +
  • +
  • +
  • activations: One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default Tanh if not specified. Default value is +name: "activations" strings: "Tanh" strings: "Tanh" type: STRINGS

  • +
  • +
  • direction: Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional. Default value is +name: "direction" s: "forward" type: STRING

  • +
  • +
+

Inputs

+

Between 3 and 6 inputs.

+
    +
  • X (heterogeneous)T: The input sequences packed (and potentially padded) into one 3-D tensor with the shape of [seq_length, batch_size, input_size].

  • +
  • W (heterogeneous)T: The weight tensor for input gate. Concatenation of Wi and WBi (if bidirectional). The tensor has shape [num_directions, hidden_size, input_size].

  • +
  • R (heterogeneous)T: The recurrence weight tensor. Concatenation of Ri and RBi (if bidirectional). The tensor has shape [num_directions, hidden_size, hidden_size].

  • +
  • B (optional, heterogeneous)T: The bias tensor for input gate. Concatenation of [Wbi, Rbi] and [WBbi, RBbi] (if bidirectional). The tensor has shape [num_directions, 2*hidden_size]. Optional: If not specified - assumed to be 0.

  • +
  • sequence_lens (optional, heterogeneous)T1: Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length seq_length. It has shape [batch_size].

  • +
  • initial_h (optional, heterogeneous)T: Optional initial value of the hidden. If not specified - assumed to be 0. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Outputs

+

Between 0 and 2 outputs.

+
    +
  • Y (optional, heterogeneous)T: A tensor that concats all the intermediate output values of the hidden. It has shape [seq_length, num_directions, batch_size, hidden_size].

  • +
  • Y_h (optional, heterogeneous)T: The last output value of the hidden. It has shape [num_directions, batch_size, hidden_size].

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • T1 tensor(int32): Constrain seq_lens to integer tensor.

  • +
+
+ +
+
+
+
+

OnnxRandomNormal#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomNormal(*args, **kwargs)#
+

Version

+

Onnx name: RandomNormal

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a normal distribution. The shape +of the tensor is specified by the shape argument and the parameter of the normal distribution +specified by mean and scale.

+

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must +be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message.

+

Attributes

+
    +
  • dtype: The data type for the elements of the output tensor. Default is TensorProto::FLOAT. Default value is +name: "dtype" i: 1 type: INT

  • +
  • mean: The mean of the normal distribution. Default value is +name: "mean" f: 0.0 type: FLOAT

  • +
  • scale: The standard deviation of the normal distribution. Default value is +name: "scale" f: 1.0 type: FLOAT

  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of random values drawn from normal distribution

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomNormalLike#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike(*args, **kwargs)#
+

Version

+

Onnx name: RandomNormalLike

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a normal distribution. +The shape of the output tensor is copied from the shape of the input tensor, +and the parameters of the normal distribution are specified by mean and scale.

+

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message, and be valid as an output type.

+

Attributes

+
    +
  • +
  • mean: The mean of the normal distribution. Default value is +name: "mean" f: 0.0 type: FLOAT

  • +
  • scale: The standard deviation of the normal distribution. Default value is +name: "scale" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of random values drawn from normal distribution

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomNormalLike_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomNormalLike_1(*args, **kwargs)#
+

Version

+

Onnx name: RandomNormalLike

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a normal distribution. +The shape of the output tensor is copied from the shape of the input tensor, +and the parameters of the normal distribution are specified by mean and scale.

+

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message, and be valid as an output type.

+

Attributes

+
    +
  • +
  • mean: The mean of the normal distribution. Default value is +name: "mean" f: 0.0 type: FLOAT

  • +
  • scale: The standard deviation of the normal distribution. Default value is +name: "scale" f: 1.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of random values drawn from normal distribution

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomNormal_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomNormal_1(*args, **kwargs)#
+

Version

+

Onnx name: RandomNormal

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a normal distribution. The shape +of the tensor is specified by the shape argument and the parameter of the normal distribution +specified by mean and scale.

+

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must +be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message.

+

Attributes

+
    +
  • dtype: The data type for the elements of the output tensor. Default is TensorProto::FLOAT. Default value is +name: "dtype" i: 1 type: INT

  • +
  • mean: The mean of the normal distribution. Default value is +name: "mean" f: 0.0 type: FLOAT

  • +
  • scale: The standard deviation of the normal distribution. Default value is +name: "scale" f: 1.0 type: FLOAT

  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of random values drawn from normal distribution

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomUniform#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomUniform(*args, **kwargs)#
+

Version

+

Onnx name: RandomUniform

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a uniform distribution. The shape +of the tensor is specified by the shape argument and the range by low and high.

+

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must +be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message.

+

Attributes

+
    +
  • dtype: The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT. Default value is +name: "dtype" i: 1 type: INT

  • +
  • high: Upper boundary of the output values. Default value is +name: "high" f: 1.0 type: FLOAT

  • +
  • low: Lower boundary of the output values. Default value is +name: "low" f: 0.0 type: FLOAT

  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of random values drawn from uniform distribution

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomUniformLike#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike(*args, **kwargs)#
+

Version

+

Onnx name: RandomUniformLike

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a uniform distribution. +The shape of the output tensor is copied from the shape of the input tensor, +and the parameters of the uniform distribution are specified by low and high.

+

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message and be valid as an output type.

+

Attributes

+
    +
  • +
  • high: Upper boundary of the output values. Default value is +name: "high" f: 1.0 type: FLOAT

  • +
  • low: Lower boundary of the output values. Default value is +name: "low" f: 0.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of random values drawn from uniform distribution

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomUniformLike_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomUniformLike_1(*args, **kwargs)#
+

Version

+

Onnx name: RandomUniformLike

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a uniform distribution. +The shape of the output tensor is copied from the shape of the input tensor, +and the parameters of the uniform distribution are specified by low and high.

+

The data type is specified by the ‘dtype’ argument, or copied from the input tensor if not provided. +The ‘dtype’ argument must be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message and be valid as an output type.

+

Attributes

+
    +
  • +
  • high: Upper boundary of the output values. Default value is +name: "high" f: 1.0 type: FLOAT

  • +
  • low: Lower boundary of the output values. Default value is +name: "low" f: 0.0 type: FLOAT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T1: Input tensor to copy shape and optionally type information from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T2: Output tensor of random values drawn from uniform distribution

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRandomUniform_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRandomUniform_1(*args, **kwargs)#
+

Version

+

Onnx name: RandomUniform

+

This version of the operator has been available since +version 1.

+

Summary

+

Generate a tensor with random values drawn from a uniform distribution. The shape +of the tensor is specified by the shape argument and the range by low and high.

+

The data type is specified by the ‘dtype’ argument. The ‘dtype’ argument must +be one of the data types specified in the ‘DataType’ enum field in the +TensorProto message.

+

Attributes

+
    +
  • dtype: The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT. Default value is +name: "dtype" i: 1 type: INT

  • +
  • high: Upper boundary of the output values. Default value is +name: "high" f: 1.0 type: FLOAT

  • +
  • low: Lower boundary of the output values. Default value is +name: "low" f: 0.0 type: FLOAT

  • +
  • +
  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of random values drawn from uniform distribution

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRange#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRange(*args, **kwargs)#
+

Version

+

Onnx name: Range

+

This version of the operator has been available since +version 11.

+

Summary

+

Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta +up to limit (exclusive).

+

The number of elements in the output of range is computed as below:

+
number_of_elements = max( ceil( (limit - start) / delta ) , 0 )
+
+
+

The pseudocode determining the contents of the output is shown below:

+
for(int i=0; i<number_of_elements; ++i) {
+  output[i] =  start + (i * delta);
+}
+
+
+

Example 1

+
Inputs: start = 3, limit = 9, delta = 3
+Output: [3, 6]
+
+
+

Example 2

+
Inputs: start = 10, limit = 4, delta = -2
+Output: [10, 8, 6]
+
+
+

Inputs

+
    +
  • start (heterogeneous)T: Scalar. First entry for the range of output values.

  • +
  • limit (heterogeneous)T: Scalar. Exclusive upper limit for the range of output values.

  • +
  • delta (heterogeneous)T: Scalar. Value to step by.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 1-D tensor with same type as the inputs containing generated range of values.

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int16), tensor(int32), tensor(int64): Constrain input types to common numeric type tensors.

  • +
+
+ +
+
+
+
+

OnnxRange_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRange_11(*args, **kwargs)#
+

Version

+

Onnx name: Range

+

This version of the operator has been available since +version 11.

+

Summary

+

Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta +up to limit (exclusive).

+

The number of elements in the output of range is computed as below:

+
number_of_elements = max( ceil( (limit - start) / delta ) , 0 )
+
+
+

The pseudocode determining the contents of the output is shown below:

+
for(int i=0; i<number_of_elements; ++i) {
+  output[i] =  start + (i * delta);
+}
+
+
+

Example 1

+
Inputs: start = 3, limit = 9, delta = 3
+Output: [3, 6]
+
+
+

Example 2

+
Inputs: start = 10, limit = 4, delta = -2
+Output: [10, 8, 6]
+
+
+

Inputs

+
    +
  • start (heterogeneous)T: Scalar. First entry for the range of output values.

  • +
  • limit (heterogeneous)T: Scalar. Exclusive upper limit for the range of output values.

  • +
  • delta (heterogeneous)T: Scalar. Value to step by.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: A 1-D tensor with same type as the inputs containing generated range of values.

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int16), tensor(int32), tensor(int64): Constrain input types to common numeric type tensors.

  • +
+
+ +
+
+
+
+

OnnxReciprocal#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReciprocal(*args, **kwargs)#
+

Version

+

Onnx name: Reciprocal

+

This version of the operator has been available since +version 13.

+

Summary

+

Reciprocal takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the reciprocal is, y = 1/x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReciprocal_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReciprocal_1(*args, **kwargs)#
+

Version

+

Onnx name: Reciprocal

+

This version of the operator has been available since +version 1.

+

Summary

+

Reciprocal takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the reciprocal is, y = 1/x, is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReciprocal_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReciprocal_13(*args, **kwargs)#
+

Version

+

Onnx name: Reciprocal

+

This version of the operator has been available since +version 13.

+

Summary

+

Reciprocal takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the reciprocal is, y = 1/x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReciprocal_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReciprocal_6(*args, **kwargs)#
+

Version

+

Onnx name: Reciprocal

+

This version of the operator has been available since +version 6.

+

Summary

+

Reciprocal takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the reciprocal is, y = 1/x, is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL1

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the L1 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL1_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL1_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL1

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the L1 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL1_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL1_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL1

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the L1 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL1_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL1_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL1

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the L1 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL1_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL1_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL1

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the L1 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL2(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL2

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the L2 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL2_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL2_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL2

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the L2 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL2_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL2_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL2

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the L2 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL2_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL2_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL2

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the L2 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceL2_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceL2_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceL2

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the L2 norm of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSum

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the log sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSumExp#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSumExp

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSumExp_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSumExp

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSumExp_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSumExp

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSumExp_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSumExp

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSumExp_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSumExp_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSumExp

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the log sum exponent of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSum_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSum

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the log sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSum_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSum

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the log sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSum_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSum

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the log sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceLogSum_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceLogSum_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceLogSum

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the log sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax_12(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 12.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMax_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMax_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMax

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the max of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMean#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMean(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMean

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the mean of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMean_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMean_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMean

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the mean of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMean_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMean_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMean

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the mean of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMean_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMean_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMean

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the mean of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMean_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMean_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMean

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the mean of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin_12(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 12.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceMin_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceMin_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceMin

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the min of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(int8): Constrain input and output types to high-precision and 8 bit numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceProd#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceProd(*args, **kwargs)#
+

Version

+

Onnx name: ReduceProd

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the product of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceProd_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceProd_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceProd

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the product of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceProd_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceProd_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceProd

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the product of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceProd_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceProd_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceProd

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the product of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceProd_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceProd_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceProd

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the product of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSum(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSum

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSumSquare#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSumSquare

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the sum square of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSumSquare_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSumSquare

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the sum square of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSumSquare_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSumSquare

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the sum square of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSumSquare_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSumSquare

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the sum square of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSumSquare_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSumSquare_18(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSumSquare

+

This version of the operator has been available since +version 18.

+

Summary

+

Computes the sum square of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSum_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSum_1(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSum

+

This version of the operator has been available since +version 1.

+

Summary

+

Computes the sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSum_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSum_11(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSum

+

This version of the operator has been available since +version 11.

+

Summary

+

Computes the sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then +the resulted tensor have the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxReduceSum_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReduceSum_13(*args, **kwargs)#
+

Version

+

Onnx name: ReduceSum

+

This version of the operator has been available since +version 13.

+

Summary

+

Computes the sum of the input tensor’s element along the provided axes. The resulting +tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then +the resulting tensor has the reduced dimension pruned.

+

The above behavior is similar to numpy, with the exception that numpy defaults keepdims to +False instead of True.

+

Attributes

+
    +
  • keepdims: Keep the reduced dimension or not, default 1 means keep reduced dimension. Default value is +name: "keepdims" i: 1 type: INT

  • +
  • noop_with_empty_axes: Defines behavior if ‘axes’ is empty. Default behavior with ‘false’ is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. Default value is +name: "noop_with_empty_axes" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • axes (optional, heterogeneous)tensor(int64): Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if ‘noop_with_empty_axes’ is false, else act as an Identity op when ‘noop_with_empty_axes’ is true. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • reduced (heterogeneous)T: Reduced output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxRelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRelu(*args, **kwargs)#
+

Version

+

Onnx name: Relu

+

This version of the operator has been available since +version 14.

+

Summary

+

Relu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxRelu_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRelu_1(*args, **kwargs)#
+

Version

+

Onnx name: Relu

+

This version of the operator has been available since +version 1.

+

Summary

+

Relu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to +the tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRelu_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRelu_13(*args, **kwargs)#
+

Version

+

Onnx name: Relu

+

This version of the operator has been available since +version 13.

+

Summary

+

Relu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRelu_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRelu_14(*args, **kwargs)#
+

Version

+

Onnx name: Relu

+

This version of the operator has been available since +version 14.

+

Summary

+

Relu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(int32), tensor(int8), tensor(int16), tensor(int64), tensor(float16), tensor(double), tensor(bfloat16): Constrain input and output types to signed numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxRelu_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRelu_6(*args, **kwargs)#
+

Version

+

Onnx name: Relu

+

This version of the operator has been available since +version 6.

+

Summary

+

Relu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReshape#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReshape(*args, **kwargs)#
+

Version

+

Onnx name: Reshape

+

This version of the operator has been available since +version 14.

+

Summary

+

Reshape the input tensor similar to numpy.reshape. +First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is +inferred from the size of the tensor and the remaining dimensions. A dimension +could also be 0, in which case the actual dimension value is unchanged (i.e. taken +from the input tensor). If ‘allowzero’ is set, and the new shape includes 0, the +dimension will be set explicitly to zero (i.e. not taken from input tensor). +Shape (second input) could be an empty shape, which means converting to a scalar. +The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

+

If the attribute ‘allowzero’ is set, it is invalid for the specified shape to +contain both a zero value and -1, as the value of the dimension corresponding +to -1 cannot be determined uniquely.

+

Attributes

+
    +
  • allowzero: (Optional) By default, when any value in the ‘shape’ input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the ‘shape’ input is set to zero, the zero value is honored, similar to NumPy. Default value is +name: "allowzero" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • +
+

Outputs

+
    +
  • reshaped (heterogeneous)T: Reshaped data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxReshape_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReshape_1(*args, **kwargs)#
+

Version

+

Onnx name: Reshape

+

This version of the operator has been available since +version 1.

+

Summary

+

Reshape the input tensor similar to numpy.reshape. +It takes a tensor as input and an argument shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is +inferred from the size of the tensor and the remaining dimensions. A dimension +could also be 0, in which case the actual dimension value is unchanged (i.e. taken +from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. +The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • reshaped (heterogeneous)T: Reshaped data.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxReshape_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReshape_13(*args, **kwargs)#
+

Version

+

Onnx name: Reshape

+

This version of the operator has been available since +version 13.

+

Summary

+

Reshape the input tensor similar to numpy.reshape. +First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is +inferred from the size of the tensor and the remaining dimensions. A dimension +could also be 0, in which case the actual dimension value is unchanged (i.e. taken +from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. +The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • +
+

Outputs

+
    +
  • reshaped (heterogeneous)T: Reshaped data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxReshape_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReshape_14(*args, **kwargs)#
+

Version

+

Onnx name: Reshape

+

This version of the operator has been available since +version 14.

+

Summary

+

Reshape the input tensor similar to numpy.reshape. +First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is +inferred from the size of the tensor and the remaining dimensions. A dimension +could also be 0, in which case the actual dimension value is unchanged (i.e. taken +from the input tensor). If ‘allowzero’ is set, and the new shape includes 0, the +dimension will be set explicitly to zero (i.e. not taken from input tensor). +Shape (second input) could be an empty shape, which means converting to a scalar. +The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

+

If the attribute ‘allowzero’ is set, it is invalid for the specified shape to +contain both a zero value and -1, as the value of the dimension corresponding +to -1 cannot be determined uniquely.

+

Attributes

+
    +
  • allowzero: (Optional) By default, when any value in the ‘shape’ input is equal to zero the corresponding dimension value is copied from the input tensor dynamically. allowzero=1 indicates that if any value in the ‘shape’ input is set to zero, the zero value is honored, similar to NumPy. Default value is +name: "allowzero" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • +
+

Outputs

+
    +
  • reshaped (heterogeneous)T: Reshaped data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxReshape_5#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReshape_5(*args, **kwargs)#
+

Version

+

Onnx name: Reshape

+

This version of the operator has been available since +version 5.

+

Summary

+

Reshape the input tensor similar to numpy.reshape. +First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is +inferred from the size of the tensor and the remaining dimensions. A dimension +could also be 0, in which case the actual dimension value is unchanged (i.e. taken +from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. +The input tensor’s shape and the output tensor’s shape are required to have the same number of elements.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
  • shape (heterogeneous)tensor(int64): Specified shape for output.

  • +
+

Outputs

+
    +
  • reshaped (heterogeneous)T: Reshaped data.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxResize#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 19.

+

Summary

+

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. +Each dimension value of the output tensor is:

+
output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
+
+
+

if input "sizes" is not specified.

+

Attributes

+
    +
  • antialias: If set to 1, “linear” and “cubic” interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel. Default value is +name: "antialias" i: 0 type: INT

  • +
  • +
  • coordinate_transformation_mode:

  • +
+

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.

+

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. +Denote x_resized as the coordinate of axis x in the resized tensor,

+
+

x_original as the coordinate of axis x in the original tensor, +length_original as the length of the original tensor in axis x, +length_resized as the length of the resized tensor in axis x, +scale = length_resized / length_original, +output_width the target length on the axis x which can be a fractional number when it is calculated out of a scale factor, +and output_width_int the effective output width as an integer.

+
+

if coordinate_transformation_mode is “half_pixel”, +` +x_original = (x_resized + 0.5) / scale - 0.5 +`

+

if coordinate_transformation_mode is “half_pixel_symmetric”, +` +adjustment = output_width_int / output_width +center = input_width / 2 +offset = center * (1 - adjustment) +x_ori = offset + (x + 0.5) / scale - 0.5 +`

+

if coordinate_transformation_mode is “pytorch_half_pixel”, +` +x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0 +`

+

if coordinate_transformation_mode is “align_corners”, +` +x_original = x_resized * (length_original - 1) / (length_resized - 1) +`

+

if coordinate_transformation_mode is “asymmetric”, +` +x_original = x_resized / scale +`

+

if coordinate_transformation_mode is “tf_crop_and_resize”, +` +x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1) +` +. Default value is

+
+

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

+
+
    +
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if mode is “cubic”. Default value is +name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • +
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is +name: "exclude_outside" i: 0 type: INT

  • +
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is +name: "extrapolation_value" f: 0.0 type: FLOAT

  • +
  • keep_aspect_ratio_policy:

  • +
+

This attribute describes how to interpret the sizes input with regard to keeping the original aspect ratio of the input, and it is not applicable when +the scales input is used.

+

Given a set of sizes, associated with a subset of axes (explicitly provided or default), and assuming d = axes[i], with i being the index of the provided sizes.

+

If keep_aspect_ratio_policy is “stretch”, the original aspect ratio is disregarded, and the input is resized to the specified size: +out_size[d] = sizes[i]

+

If keep_aspect_ratio_policy is “not_larger”, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: +` +scale = Min(sizes[i] / in_size[d]) +out_size[d] = round_int(scale * in_size[i]) +`

+

If keep_aspect_ratio_policy is “not_smaller”, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: +` +scale = Max(sizes[i] / in_size[d]) +out_size[d] = round_int(scale * in_size[i]) +`

+

For non-resizable axes (those not specified in axes), the output size will be equal to the input size.

+
+
Note: round_int stands for computing the nearest integer value, rounding halfway cases up. Default value is

name: "keep_aspect_ratio_policy" s: "stretch" type: STRING

+
+
+
    +
  • mode: Three interpolation modes: “nearest” (default), “linear” and “cubic”. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • nearest_mode: Four modes: “round_prefer_floor” (default, as known as round half down), “round_prefer_ceil” (as known as round half up), “floor”, “ceil”. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is +name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • +
+

Inputs

+

Between 1 and 4 inputs.

+
    +
  • X (heterogeneous)T1: N-D tensor

  • +
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X or the length of axes, if provided. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • +
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’ or the length of ‘axes’, if provided. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • +
  • sizes (optional, heterogeneous)tensor(int64): Target size of the output tensor. Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.The number of elements of ‘sizes’ should be the same as the rank of input ‘X’, or the length of ‘axes’, if provided. Only one of ‘scales’ and ‘sizes’ can be specified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • +
+
+ +
+
+
+
+

OnnxResize_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize_10(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 10.

+

Summary

+

Resize the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * scale).

+
+

Attributes

+
    +
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is +name: "mode" s: "nearest" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: N-D tensor

  • +
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxResize_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize_11(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 11.

+

Summary

+

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input "sizes" is not specified.

+
+

Attributes

+
    +
  • coordinate_transformation_mode:

  • +
+

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

+

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. +Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

+

if coordinate_transformation_mode is “half_pixel”, <br/> +x_original = (x_resized + 0.5) / scale - 0.5, <br/>

+

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> +x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, <br/>

+

if coordinate_transformation_mode is “align_corners”, <br/> +x_original = x_resized * (length_original - 1) / (length_resized - 1), <br/>

+

if coordinate_transformation_mode is “asymmetric”, <br/> +x_original = x_resized / scale, <br/>

+

if coordinate_transformation_mode is “tf_half_pixel_for_nn”, <br/> +x_original = (x_resized + 0.5) / scale, <br/>

+

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> +x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1). Default value is

+
+

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

+
+
    +
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if “mode” is “cubic”. Default value is +name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • +
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is +name: "exclude_outside" i: 0 type: INT

  • +
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is +name: "extrapolation_value" f: 0.0 type: FLOAT

  • +
  • mode: Three interpolation modes: nearest (default), linear and cubic. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • nearest_mode: Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is +name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • +
+

Inputs

+

Between 3 and 4 inputs.

+
    +
  • X (heterogeneous)T1: N-D tensor

  • +
  • roi (heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • +
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’. If ‘size’ is needed, the user must set ‘scales’ to an empty tensor.

  • +
  • sizes (optional, heterogeneous)tensor(int64): The size of the output tensor. The number of elements of ‘sizes’ should be the same as the rank of input ‘X’. May only be set if ‘scales’ is set to an empty tensor.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • +
+
+ +
+
+
+
+

OnnxResize_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize_13(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 13.

+

Summary

+

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input "sizes" is not specified.

+
+

Attributes

+
    +
  • coordinate_transformation_mode:

  • +
+

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

+

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. +Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

+

if coordinate_transformation_mode is “half_pixel”, <br/> +x_original = (x_resized + 0.5) / scale - 0.5, <br/>

+

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> +x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, <br/>

+

if coordinate_transformation_mode is “align_corners”, <br/> +x_original = x_resized * (length_original - 1) / (length_resized - 1), <br/>

+

if coordinate_transformation_mode is “asymmetric”, <br/> +x_original = x_resized / scale, <br/>

+

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> +x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1). Default value is

+
+

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

+
+
    +
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if “mode” is “cubic”. Default value is +name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • +
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is +name: "exclude_outside" i: 0 type: INT

  • +
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is +name: "extrapolation_value" f: 0.0 type: FLOAT

  • +
  • mode: Three interpolation modes: nearest (default), linear and cubic. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • nearest_mode: Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is +name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • +
+

Inputs

+

Between 1 and 4 inputs.

+
    +
  • X (heterogeneous)T1: N-D tensor

  • +
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • +
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • +
  • sizes (optional, heterogeneous)tensor(int64): The size of the output tensor. The number of elements of ‘sizes’ should be the same as the rank of input ‘X’. Only one of ‘scales’ and ‘sizes’ can be specified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • +
+
+ +
+
+
+
+

OnnxResize_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize_18(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 18.

+

Summary

+

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. +Each dimension value of the output tensor is: <br/>

+
+

output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) <br/>

+
+

if input "sizes" is not specified.

+

Attributes

+
    +
  • antialias: If set to 1, “linear” and “cubic” interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel. Default value is +name: "antialias" i: 0 type: INT

  • +
  • +
  • coordinate_transformation_mode:

  • +
+

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. <br/>

+

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. +Denote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input “roi”, scale = length_resized / length_original, <br/>

+

if coordinate_transformation_mode is “half_pixel”, <br/> +x_original = (x_resized + 0.5) / scale - 0.5 <br/>

+

if coordinate_transformation_mode is “pytorch_half_pixel”, <br/> +x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0 <br/>

+

if coordinate_transformation_mode is “align_corners”, <br/> +x_original = x_resized * (length_original - 1) / (length_resized - 1) <br/>

+

if coordinate_transformation_mode is “asymmetric”, <br/> +x_original = x_resized / scale <br/>

+

if coordinate_transformation_mode is “tf_crop_and_resize”, <br/> +x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1) +. Default value is

+
+

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

+
+
    +
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if mode is “cubic”. Default value is +name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • +
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is +name: "exclude_outside" i: 0 type: INT

  • +
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is +name: "extrapolation_value" f: 0.0 type: FLOAT

  • +
  • keep_aspect_ratio_policy:

  • +
+

This attribute describes how to interpret the sizes input with regard to keeping the original aspect ratio of the input, and it is not applicable when +the scales input is used. <br/>

+

Given a set of sizes, associated with a subset of axes (explicitly provided or default), and assuming d = axes[i], with i being the index of the provided sizes. <br/>

+

If keep_aspect_ratio_policy is “stretch”, the original aspect ratio is disregarded, and the input is resized to the specified size: <br/> +out_size[d] = sizes[i] <br/>

+

If keep_aspect_ratio_policy is “not_larger”, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: <br/> +scale = Min(sizes[i] / in_size[d]) <br/> +out_size[d] = round_int(scale * in_size[i]) <br/>

+

If keep_aspect_ratio_policy is “not_smaller”, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: <br/> +scale = Max(sizes[i] / in_size[d]) <br/> +out_size[d] = round_int(scale * in_size[i]) <br/>

+

For non-resizable axes (those not specified in axes), the output size will be equal to the input size.

+
+
Note: round_int stands for computing the nearest integer value, rounding halfway cases up. Default value is

name: "keep_aspect_ratio_policy" s: "stretch" type: STRING

+
+
+
    +
  • mode: Three interpolation modes: “nearest” (default), “linear” and “cubic”. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • nearest_mode: Four modes: “round_prefer_floor” (default, as known as round half down), “round_prefer_ceil” (as known as round half up), “floor”, “ceil”. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is +name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • +
+

Inputs

+

Between 1 and 4 inputs.

+
    +
  • X (heterogeneous)T1: N-D tensor

  • +
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X or the length of axes, if provided. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • +
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’ or the length of ‘axes’, if provided. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • +
  • sizes (optional, heterogeneous)tensor(int64): Target size of the output tensor. Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.The number of elements of ‘sizes’ should be the same as the rank of input ‘X’, or the length of ‘axes’, if provided. Only one of ‘scales’ and ‘sizes’ can be specified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • +
+
+ +
+
+
+
+

OnnxResize_19#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxResize_19(*args, **kwargs)#
+

Version

+

Onnx name: Resize

+

This version of the operator has been available since +version 19.

+

Summary

+

Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. +Each dimension value of the output tensor is:

+
output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
+
+
+

if input "sizes" is not specified.

+

Attributes

+
    +
  • antialias: If set to 1, “linear” and “cubic” interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale), which means that when downsampling, more input pixels contribute to an output pixel. Default value is +name: "antialias" i: 0 type: INT

  • +
  • +
  • coordinate_transformation_mode:

  • +
+

This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.

+

The coordinate of each dimension is transformed individually. Let’s describe a case using axis x as an example. +Denote x_resized as the coordinate of axis x in the resized tensor,

+
+

x_original as the coordinate of axis x in the original tensor, +length_original as the length of the original tensor in axis x, +length_resized as the length of the resized tensor in axis x, +scale = length_resized / length_original, +output_width the target length on the axis x which can be a fractional number when it is calculated out of a scale factor, +and output_width_int the effective output width as an integer.

+
+

if coordinate_transformation_mode is “half_pixel”, +` +x_original = (x_resized + 0.5) / scale - 0.5 +`

+

if coordinate_transformation_mode is “half_pixel_symmetric”, +` +adjustment = output_width_int / output_width +center = input_width / 2 +offset = center * (1 - adjustment) +x_ori = offset + (x + 0.5) / scale - 0.5 +`

+

if coordinate_transformation_mode is “pytorch_half_pixel”, +` +x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0 +`

+

if coordinate_transformation_mode is “align_corners”, +` +x_original = x_resized * (length_original - 1) / (length_resized - 1) +`

+

if coordinate_transformation_mode is “asymmetric”, +` +x_original = x_resized / scale +`

+

if coordinate_transformation_mode is “tf_crop_and_resize”, +` +x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1) +` +. Default value is

+
+

name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

+
+
    +
  • cubic_coeff_a: The coefficient ‘a’ used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if mode is “cubic”. Default value is +name: "cubic_coeff_a" f: -0.75 type: FLOAT

  • +
  • exclude_outside: If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0. Default value is +name: "exclude_outside" i: 0 type: INT

  • +
  • extrapolation_value: When coordinate_transformation_mode is “tf_crop_and_resize” and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f. Default value is +name: "extrapolation_value" f: 0.0 type: FLOAT

  • +
  • keep_aspect_ratio_policy:

  • +
+

This attribute describes how to interpret the sizes input with regard to keeping the original aspect ratio of the input, and it is not applicable when +the scales input is used.

+

Given a set of sizes, associated with a subset of axes (explicitly provided or default), and assuming d = axes[i], with i being the index of the provided sizes.

+

If keep_aspect_ratio_policy is “stretch”, the original aspect ratio is disregarded, and the input is resized to the specified size: +out_size[d] = sizes[i]

+

If keep_aspect_ratio_policy is “not_larger”, the sizes are adjusted so that no extent of the output is larger than the specified size, while keeping the original aspect ratio: +` +scale = Min(sizes[i] / in_size[d]) +out_size[d] = round_int(scale * in_size[i]) +`

+

If keep_aspect_ratio_policy is “not_smaller”, the sizes are adjusted so that no extent of the output is smaller than the specified size, while keeping the original aspect ratio: +` +scale = Max(sizes[i] / in_size[d]) +out_size[d] = round_int(scale * in_size[i]) +`

+

For non-resizable axes (those not specified in axes), the output size will be equal to the input size.

+
+
Note: round_int stands for computing the nearest integer value, rounding halfway cases up. Default value is

name: "keep_aspect_ratio_policy" s: "stretch" type: STRING

+
+
+
    +
  • mode: Three interpolation modes: “nearest” (default), “linear” and “cubic”. The “linear” mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The “cubic” mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor). Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • nearest_mode: Four modes: “round_prefer_floor” (default, as known as round half down), “round_prefer_ceil” (as known as round half up), “floor”, “ceil”. Only used by nearest interpolation. It indicates how to get “nearest” pixel in input tensor from x_original, so this attribute is valid only if “mode” is “nearest”. Default value is +name: "nearest_mode" s: "round_prefer_floor" type: STRING

  • +
+

Inputs

+

Between 1 and 4 inputs.

+
    +
  • X (heterogeneous)T1: N-D tensor

  • +
  • roi (optional, heterogeneous)T2: 1-D tensor given as [start1, …, startN, end1, …, endN], where N is the rank of X or the length of axes, if provided. The RoIs’ coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is “tf_crop_and_resize”

  • +
  • scales (optional, heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than 0. If it’s less than 1, it’s sampling down, otherwise, it’s upsampling. The number of elements of ‘scales’ should be the same as the rank of input ‘X’ or the length of ‘axes’, if provided. One of ‘scales’ and ‘sizes’ MUST be specified and it is an error if both are specified. If ‘sizes’ is needed, the user can use an empty string as the name of ‘scales’ in this operator’s input list.

  • +
  • sizes (optional, heterogeneous)tensor(int64): Target size of the output tensor. Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.The number of elements of ‘sizes’ should be the same as the rank of input ‘X’, or the length of ‘axes’, if provided. Only one of ‘scales’ and ‘sizes’ can be specified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T1 tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
  • T2 tensor(float16), tensor(float), tensor(double): Constrain roi type to float or double.

  • +
+
+ +
+
+
+
+

OnnxReverseSequence#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReverseSequence(*args, **kwargs)#
+

Version

+

Onnx name: ReverseSequence

+

This version of the operator has been available since +version 10.

+

Summary

+

Reverse batch of sequences having different lengths specified by sequence_lens.

+

For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, +and copies elements whose index’s beyond sequence_lens[i] to the output. So the output slice i contains reversed +sequences on the first sequence_lens[i] elements, then have original values copied for the other elements.

+
+
Example 1:
+
input = [[0.0, 4.0, 8.0, 12.0],

[1.0, 5.0, 9.0, 13.0], +[2.0, 6.0, 10.0, 14.0], +[3.0, 7.0, 11.0, 15.0]]

+
+
+

sequence_lens = [4, 3, 2, 1] +time_axis = 0 +batch_axis = 1

+
+
output = [[3.0, 6.0, 9.0, 12.0],

[2.0, 5.0, 8.0, 13.0], +[1.0, 4.0, 10.0, 14.0], +[0.0, 7.0, 11.0, 15.0]]

+
+
+
+
Example 2:
+
input = [[0.0, 1.0, 2.0, 3.0 ],

[4.0, 5.0, 6.0, 7.0 ], +[8.0, 9.0, 10.0, 11.0], +[12.0, 13.0, 14.0, 15.0]]

+
+
+

sequence_lens = [1, 2, 3, 4] +time_axis = 1 +batch_axis = 0

+
+
output = [[0.0, 1.0, 2.0, 3.0 ],

[5.0, 4.0, 6.0, 7.0 ], +[10.0, 9.0, 8.0, 11.0], +[15.0, 14.0, 13.0, 12.0]]

+
+
+
+
+

Attributes

+
    +
  • batch_axis: (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. Default value is +name: "batch_axis" i: 1 type: INT

  • +
  • time_axis: (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. Default value is +name: "time_axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Tensor of rank r >= 2.

  • +
  • sequence_lens (heterogeneous)tensor(int64): Tensor specifying lengths of the sequences in a batch. It has shape [batch_size].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Tensor with same shape of input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
+
+ +
+
+
+
+

OnnxReverseSequence_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxReverseSequence_10(*args, **kwargs)#
+

Version

+

Onnx name: ReverseSequence

+

This version of the operator has been available since +version 10.

+

Summary

+

Reverse batch of sequences having different lengths specified by sequence_lens.

+

For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, +and copies elements whose index’s beyond sequence_lens[i] to the output. So the output slice i contains reversed +sequences on the first sequence_lens[i] elements, then have original values copied for the other elements.

+
+
Example 1:
+
input = [[0.0, 4.0, 8.0, 12.0],

[1.0, 5.0, 9.0, 13.0], +[2.0, 6.0, 10.0, 14.0], +[3.0, 7.0, 11.0, 15.0]]

+
+
+

sequence_lens = [4, 3, 2, 1] +time_axis = 0 +batch_axis = 1

+
+
output = [[3.0, 6.0, 9.0, 12.0],

[2.0, 5.0, 8.0, 13.0], +[1.0, 4.0, 10.0, 14.0], +[0.0, 7.0, 11.0, 15.0]]

+
+
+
+
Example 2:
+
input = [[0.0, 1.0, 2.0, 3.0 ],

[4.0, 5.0, 6.0, 7.0 ], +[8.0, 9.0, 10.0, 11.0], +[12.0, 13.0, 14.0, 15.0]]

+
+
+

sequence_lens = [1, 2, 3, 4] +time_axis = 1 +batch_axis = 0

+
+
output = [[0.0, 1.0, 2.0, 3.0 ],

[5.0, 4.0, 6.0, 7.0 ], +[10.0, 9.0, 8.0, 11.0], +[15.0, 14.0, 13.0, 12.0]]

+
+
+
+
+

Attributes

+
    +
  • batch_axis: (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. Default value is +name: "batch_axis" i: 1 type: INT

  • +
  • time_axis: (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. Default value is +name: "time_axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Tensor of rank r >= 2.

  • +
  • sequence_lens (heterogeneous)tensor(int64): Tensor specifying lengths of the sequences in a batch. It has shape [batch_size].

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Tensor with same shape of input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
+
+ +
+
+
+
+

OnnxRoiAlign#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRoiAlign(*args, **kwargs)#
+

Version

+

Onnx name: RoiAlign

+

This version of the operator has been available since +version 16.

+

Summary

+

Region of Interest (RoI) align operation described in the +[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). +RoiAlign consumes an input tensor X and region of interests (rois) +to apply pooling across each RoI; it produces a 4-D tensor of shape +(num_rois, C, output_height, output_width).

+

RoiAlign is proposed to avoid the misalignment by removing +quantizations while converting from original image into feature +map and from feature map into RoI feature; in each ROI bin, +the value of the sampled locations are computed directly +through bilinear interpolation.

+

Attributes

+
    +
  • coordinate_transformation_mode: Allowed values are ‘half_pixel’ and ‘output_half_pixel’. Use the value ‘half_pixel’ to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value ‘output_half_pixel’ to omit the pixel shift for the input (use this for a backward-compatible behavior). Default value is +name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

  • +
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is +name: "mode" s: "avg" type: STRING

  • +
  • output_height: default 1; Pooled output Y’s height. Default value is +name: "output_height" i: 1 type: INT

  • +
  • output_width: default 1; Pooled output Y’s width. Default value is +name: "output_width" i: 1 type: INT

  • +
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is +name: "sampling_ratio" i: 0 type: INT

  • +
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is +name: "spatial_scale" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • +
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • +
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • +
  • T2 tensor(int64): Constrain types to int tensors.

  • +
+
+ +
+
+
+
+

OnnxRoiAlign_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRoiAlign_10(*args, **kwargs)#
+

Version

+

Onnx name: RoiAlign

+

This version of the operator has been available since +version 10.

+

Summary

+

Region of Interest (RoI) align operation described in the +[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). +RoiAlign consumes an input tensor X and region of interests (rois) +to apply pooling across each RoI; it produces a 4-D tensor of shape +(num_rois, C, output_height, output_width).

+

RoiAlign is proposed to avoid the misalignment by removing +quantizations while converting from original image into feature +map and from feature map into RoI feature; in each ROI bin, +the value of the sampled locations are computed directly +through bilinear interpolation.

+

Attributes

+
    +
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is +name: "mode" s: "avg" type: STRING

  • +
  • output_height: default 1; Pooled output Y’s height. Default value is +name: "output_height" i: 1 type: INT

  • +
  • output_width: default 1; Pooled output Y’s width. Default value is +name: "output_width" i: 1 type: INT

  • +
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is +name: "sampling_ratio" i: 0 type: INT

  • +
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is +name: "spatial_scale" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • +
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • +
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • +
  • T2 tensor(int64): Constrain types to int tensors.

  • +
+
+ +
+
+
+
+

OnnxRoiAlign_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRoiAlign_16(*args, **kwargs)#
+

Version

+

Onnx name: RoiAlign

+

This version of the operator has been available since +version 16.

+

Summary

+

Region of Interest (RoI) align operation described in the +[Mask R-CNN paper](https://arxiv.org/abs/1703.06870). +RoiAlign consumes an input tensor X and region of interests (rois) +to apply pooling across each RoI; it produces a 4-D tensor of shape +(num_rois, C, output_height, output_width).

+

RoiAlign is proposed to avoid the misalignment by removing +quantizations while converting from original image into feature +map and from feature map into RoI feature; in each ROI bin, +the value of the sampled locations are computed directly +through bilinear interpolation.

+

Attributes

+
    +
  • coordinate_transformation_mode: Allowed values are ‘half_pixel’ and ‘output_half_pixel’. Use the value ‘half_pixel’ to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value ‘output_half_pixel’ to omit the pixel shift for the input (use this for a backward-compatible behavior). Default value is +name: "coordinate_transformation_mode" s: "half_pixel" type: STRING

  • +
  • mode: The pooling method. Two modes are supported: ‘avg’ and ‘max’. Default is ‘avg’. Default value is +name: "mode" s: "avg" type: STRING

  • +
  • output_height: default 1; Pooled output Y’s height. Default value is +name: "output_height" i: 1 type: INT

  • +
  • output_width: default 1; Pooled output Y’s width. Default value is +name: "output_width" i: 1 type: INT

  • +
  • sampling_ratio: Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0. Default value is +name: "sampling_ratio" i: 0 type: INT

  • +
  • spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. Default value is +name: "spatial_scale" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.

  • +
  • rois (heterogeneous)T1: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], …]. The RoIs’ coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the ‘batch_indices’ input.

  • +
  • batch_indices (heterogeneous)T2: 1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float16), tensor(float), tensor(double): Constrain types to float tensors.

  • +
  • T2 tensor(int64): Constrain types to int tensors.

  • +
+
+ +
+
+
+
+

OnnxRound#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRound(*args, **kwargs)#
+

Version

+

Onnx name: Round

+

This version of the operator has been available since +version 11.

+

Summary

+

Round takes one input Tensor and rounds the values, element-wise, meaning +it finds the nearest integer for each value. +In case of halfs, the rule is to round them to the nearest even integer. +The output tensor has the same shape and type as the input.

+

Examples:

+
round([0.9]) = [1.0]
+round([2.5]) = [2.0]
+round([2.3]) = [2.0]
+round([1.5]) = [2.0]
+round([-4.5]) = [-4.0]
+
+
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxRound_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxRound_11(*args, **kwargs)#
+

Version

+

Onnx name: Round

+

This version of the operator has been available since +version 11.

+

Summary

+

Round takes one input Tensor and rounds the values, element-wise, meaning +it finds the nearest integer for each value. +In case of halfs, the rule is to round them to the nearest even integer. +The output tensor has the same shape and type as the input.

+

Examples:

+
round([0.9]) = [1.0]
+round([2.5]) = [2.0]
+round([2.3]) = [2.0]
+round([1.5]) = [2.0]
+round([-4.5]) = [-4.0]
+
+
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSTFT#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSTFT(*args, **kwargs)#
+

Version

+

Onnx name: STFT

+

This version of the operator has been available since +version 17.

+

Summary

+

Computes the Short-time Fourier Transform of the signal.

+

Attributes

+
    +
  • onesided: If onesided is 1, only values for w in [0, 1, 2, …, floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT).When invoked with real or complex valued input, the default value is 1. Values can be 0 or 1. Default value is +name: "onesided" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • signal (heterogeneous)T1: Input tensor representing a real or complex valued signal. For real input, the following shape is expected: [batch_size][signal_length][1]. For complex input, the following shape is expected: [batch_size][signal_length][2], where [batch_size][signal_length][0] represents the real component and [batch_size][signal_length][1] represents the imaginary component of the signal.

  • +
  • frame_step (heterogeneous)T2: The number of samples to step between successive DFTs.

  • +
  • window (optional, heterogeneous)T1: A tensor representing the window that will be slid over the signal.The window must have rank 1 with shape: [window_shape]. It’s an optional value.

  • +
  • frame_length (optional, heterogeneous)T2: A scalar representing the size of the DFT. It’s an optional value.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: The Short-time Fourier Transform of the signals.If onesided is 1, the output has the shape: [batch_size][frames][dft_unique_bins][2], where dft_unique_bins is frame_length // 2 + 1 (the unique components of the DFT) If onesided is 0, the output has the shape: [batch_size][frames][frame_length][2], where frame_length is the length of the DFT.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(float16), tensor(double), tensor(bfloat16): Constrain signal and output to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain scalar length types to int64_t.

  • +
+
+ +
+
+
+
+

OnnxSTFT_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSTFT_17(*args, **kwargs)#
+

Version

+

Onnx name: STFT

+

This version of the operator has been available since +version 17.

+

Summary

+

Computes the Short-time Fourier Transform of the signal.

+

Attributes

+
    +
  • onesided: If onesided is 1, only values for w in [0, 1, 2, …, floor(n_fft/2) + 1] are returned because the real-to-complex Fourier transform satisfies the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, then onesided output is not possible. Enabling onesided with real inputs performs a Real-valued fast Fourier transform (RFFT).When invoked with real or complex valued input, the default value is 1. Values can be 0 or 1. Default value is +name: "onesided" i: 1 type: INT

  • +
+

Inputs

+

Between 2 and 4 inputs.

+
    +
  • signal (heterogeneous)T1: Input tensor representing a real or complex valued signal. For real input, the following shape is expected: [batch_size][signal_length][1]. For complex input, the following shape is expected: [batch_size][signal_length][2], where [batch_size][signal_length][0] represents the real component and [batch_size][signal_length][1] represents the imaginary component of the signal.

  • +
  • frame_step (heterogeneous)T2: The number of samples to step between successive DFTs.

  • +
  • window (optional, heterogeneous)T1: A tensor representing the window that will be slid over the signal.The window must have rank 1 with shape: [window_shape]. It’s an optional value.

  • +
  • frame_length (optional, heterogeneous)T2: A scalar representing the size of the DFT. It’s an optional value.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T1: The Short-time Fourier Transform of the signals.If onesided is 1, the output has the shape: [batch_size][frames][dft_unique_bins][2], where dft_unique_bins is frame_length // 2 + 1 (the unique components of the DFT) If onesided is 0, the output has the shape: [batch_size][frames][frame_length][2], where frame_length is the length of the DFT.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(float16), tensor(double), tensor(bfloat16): Constrain signal and output to float tensors.

  • +
  • T2 tensor(int32), tensor(int64): Constrain scalar length types to int64_t.

  • +
+
+ +
+
+
+
+

OnnxSVMClassifier#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSVMClassifier(*args, **kwargs)#
+

Version

+

Onnx name: SVMClassifier

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Support Vector Machine classifier

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is +name: "kernel_type" s: "LINEAR" type: STRING

  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Data to be classified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • +
  • Z (heterogeneous)tensor(float): Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, either [C] or [N,C].

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.

  • +
+
+ +
+
+
+
+

OnnxSVMClassifier_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSVMClassifier_1(*args, **kwargs)#
+

Version

+

Onnx name: SVMClassifier

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Support Vector Machine classifier

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is +name: "kernel_type" s: "LINEAR" type: STRING

  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Data to be classified.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: Classification outputs (one class per example).

  • +
  • Z (heterogeneous)tensor(float): Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores.

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type, either [C] or [N,C].

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.

  • +
+
+ +
+
+
+
+

OnnxSVMRegressor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSVMRegressor(*args, **kwargs)#
+

Version

+

Onnx name: SVMRegressor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Support Vector Machine regression prediction and one-class SVM anomaly detection.

+

Attributes

+
    +
  • +
  • +
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is +name: "kernel_type" s: "LINEAR" type: STRING

  • +
  • n_supports: The number of support vectors. Default value is +name: "n_supports" i: 0 type: INT

  • +
  • one_class: Flag indicating whether the regression is a one-class SVM or not. Default value is +name: "one_class" i: 0 type: INT

  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be regressed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Regression outputs (one score per target per example).

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [C] or [N,C].

  • +
+
+ +
+
+
+
+

OnnxSVMRegressor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSVMRegressor_1(*args, **kwargs)#
+

Version

+

Onnx name: SVMRegressor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Support Vector Machine regression prediction and one-class SVM anomaly detection.

+

Attributes

+
    +
  • +
  • +
  • kernel_type: The kernel type, one of ‘LINEAR,’ ‘POLY,’ ‘RBF,’ ‘SIGMOID’. Default value is +name: "kernel_type" s: "LINEAR" type: STRING

  • +
  • n_supports: The number of support vectors. Default value is +name: "n_supports" i: 0 type: INT

  • +
  • one_class: Flag indicating whether the regression is a one-class SVM or not. Default value is +name: "one_class" i: 0 type: INT

  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be regressed.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Regression outputs (one score per target per example).

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type, either [C] or [N,C].

  • +
+
+ +
+
+
+
+

OnnxScaler#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScaler(*args, **kwargs)#
+

Version

+

Onnx name: Scaler

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be scaled.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Scaled output data.

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxScaler_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScaler_1(*args, **kwargs)#
+

Version

+

Onnx name: Scaler

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Data to be scaled.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): Scaled output data.

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxScan#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScan(*args, **kwargs)#
+

Version

+

Onnx name: Scan

+

This version of the operator has been available since +version 16.

+

Summary

+

Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from general recurrences, +functional programming constructs such as scan, fold, map, and zip, and is intended to enable +generalizations of RNN-like constructs for sequence-to-sequence processing. +Other tensors (referred to as state_variables here) can be used to carry a state +when iterating from one element to another (similar to hidden-state in RNNs, also referred +to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where functionality +similar to scan, fold and map can be obtained). When more than one scan_input is used, +a behavior similar to zip is obtained.

+

The attribute body must be a graph, specifying the computation to be performed in +every iteration. It takes as input the current values of the state_variables and +the current iterated element of the scan_inputs. It must return the (updated) values +of the state_variables and zero or more scan_output_element tensors. The values of the +scan_output_element tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated intermediate +hidden-state values of RNN-like constructs). All the output tensors (state_variables as +well as scan_output_element tensors) are required to have the same shape in each iteration +of the loop (a restriction imposed to enable efficient memory allocation).

+

Note that the iterated element passed to the body subgraph does not have a sequence +axis. It will have a rank one less than the rank of the corresponding scan_input.

+

The scan operation returns the final values of the state_variables as well as the +scan_outputs.

+

The optional attribute scan_input_directions specifies the direction (forward or backward) +for each scan input. If this attribute is omitted, all sequences are scanned in the forward +direction. A bidirectional scan may be performed by specifying the same tensor input twice +in the scan_inputs, once with a forward direction, and once with a backward direction.

+

The scan_output of the operation is produced by concatenating the scan_output_element +values produced by the body in each iteration. The optional attribute scan_output_directions +specifies the direction in which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each scan_output. If this attribute +is omitted, the scan_output_element is appended to the scan_output in each iteration.

+

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. +If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the +batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. +Note that scanning a non-zero axis may be less efficient than scanning axis zero.

+

The optional attribute scan_output_axes specifies the axis along which the scan_outputs +are accumulated for each scan_output. For example, if axis 1 is the time axis (to be +scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis +value of 1.

+

Note that because of the ONNX restriction that only the last parameter of an operator can +be variadic, the initial-states and scan-inputs are listed together as one input parameter. +Similarly, the final-states and scan-outputs are listed together as one output parameter. +The attribute num_scan_inputs indicates the number M of scan-inputs.

+

The behavior of

+
+
+
Scan <

num_scan_inputs = m, +body = loop-body, +scan_input_axes = [axis_1, …, axis_m]

+
+
+

> (init_1, …, init_n, scan_1, …, scan_m)

+
+

is equivalent to the following pseudo-code:

+
+

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i +// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. +sequence_length = scan_1.shape[axis_1];

+

// initialize state-variables +st_1 = init_1; … st_n = init_n; +// initialize scan-output variables: [] denotes an empty tensor +scan_out_1 = []; …; scan_out_k = []; +// identify number of iterations:

+

// execute loop +for (int t = 0; t < sequence_length; ++t) {

+
+

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor +// of rank one less than T obtained by indexing T at position t along axis k. +si_1 = scan_1<axis=axis_1>[t]; +… ; +si_m = scan_m<axis=axis_m>[t]; +// execute loop-body +st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) +// accumulate the scan-output elements +scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

+
+

}

+

return st_1, …, st_n, scan_out_1, …, scan_out_k;

+
+

Sample usage: Encoding RNN using a Scan

+

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, +recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can +be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes +%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these +values are computed in the outer graph, they need to be passed in as extra state_variables.

+
+
+
graph rnn-encoding {

%H_0 = … +%X = … +%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) +return %Y, %Y_h

+
+
+

}

+
+
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] +%X_t[FLOAT, tensor]

+
+
) {

%Wi = … +%Ri = … +%Wbi = … +%Rbi = … +%t1 = X_t * (Wi^T) +%t2 = H_tminus1*(Ri^T) +%t3 = Add(%t1, %t2) +%t4 = Add(%t3, %Wbi) +%t5 = Add(%t4, %Rbi) +%Ht = Tanh(%t5) +%Accumulate = Identity(%Ht) +return %Ht, %Accumulate

+
+
+

}

+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
+
+ +
+
+
+
+

OnnxScan_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScan_11(*args, **kwargs)#
+

Version

+

Onnx name: Scan

+

This version of the operator has been available since +version 11.

+

Summary

+

Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from general recurrences, +functional programming constructs such as scan, fold, map, and zip, and is intended to enable +generalizations of RNN-like constructs for sequence-to-sequence processing. +Other tensors (referred to as state_variables here) can be used to carry a state +when iterating from one element to another (similar to hidden-state in RNNs, also referred +to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where functionality +similar to scan, fold and map can be obtained). When more than one scan_input is used, +a behavior similar to zip is obtained.

+

The attribute body must be a graph, specifying the computation to be performed in +every iteration. It takes as input the current values of the state_variables and +the current iterated element of the scan_inputs. It must return the (updated) values +of the state_variables and zero or more scan_output_element tensors. The values of the +scan_output_element tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated intermediate +hidden-state values of RNN-like constructs). All the output tensors (state_variables as +well as scan_output_element tensors) are required to have the same shape in each iteration +of the loop (a restriction imposed to enable efficient memory allocation).

+

Note that the iterated element passed to the body subgraph does not have a sequence +axis. It will have a rank one less than the rank of the corresponding scan_input.

+

The scan operation returns the final values of the state_variables as well as the +scan_outputs.

+

The optional attribute scan_input_directions specifies the direction (forward or backward) +for each scan input. If this attribute is omitted, all sequences are scanned in the forward +direction. A bidirectional scan may be performed by specifying the same tensor input twice +in the scan_inputs, once with a forward direction, and once with a backward direction.

+

The scan_output of the operation is produced by concatenating the scan_output_element +values produced by the body in each iteration. The optional attribute scan_output_directions +specifies the direction in which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each scan_output. If this attribute +is omitted, the scan_output_element is appended to the scan_output in each iteration.

+

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. +If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the +batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. +Note that scanning a non-zero axis may be less efficient than scanning axis zero.

+

The optional attribute scan_output_axes specifies the axis along which the scan_outputs +are accumulated for each scan_output. For example, if axis 1 is the time axis (to be +scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis +value of 1.

+

Note that because of the ONNX restriction that only the last parameter of an operator can +be variadic, the initial-states and scan-inputs are listed together as one input parameter. +Similarly, the final-states and scan-outputs are listed together as one output parameter. +The attribute num_scan_inputs indicates the number M of scan-inputs.

+

The behavior of

+
+
+
Scan <

num_scan_inputs = m, +body = loop-body, +scan_input_axes = [axis_1, …, axis_m]

+
+
+

> (init_1, …, init_n, scan_1, …, scan_m)

+
+

is equivalent to the following pseudo-code:

+
+

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i +// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. +sequence_length = scan_1.shape[axis_1];

+

// initialize state-variables +st_1 = init_1; … st_n = init_n; +// initialize scan-output variables: [] denotes an empty tensor +scan_out_1 = []; …; scan_out_k = []; +// identify number of iterations:

+

// execute loop +for (int t = 0; t < sequence_length; ++t) {

+
+

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor +// of rank one less than T obtained by indexing T at position t along axis k. +si_1 = scan_1<axis=axis_1>[t]; +… ; +si_m = scan_m<axis=axis_m>[t]; +// execute loop-body +st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) +// accumulate the scan-output elements +scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

+
+

}

+

return st_1, …, st_n, scan_out_1, …, scan_out_k;

+
+

Sample usage: Encoding RNN using a Scan

+

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, +recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can +be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes +%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these +values are computed in the outer graph, they need to be passed in as extra state_variables.

+
+
+
graph rnn-encoding {

%H_0 = … +%X = … +%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) +return %Y, %Y_h

+
+
+

}

+
+
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] +%X_t[FLOAT, tensor]

+
+
) {

%Wi = … +%Ri = … +%Wbi = … +%Rbi = … +%t1 = X_t * (Wi^T) +%t2 = H_tminus1*(Ri^T) +%t3 = Add(%t1, %t2) +%t4 = Add(%t3, %Wbi) +%t5 = Add(%t4, %Rbi) +%Ht = Tanh(%t5) +%Accumulate = Identity(%Ht) +return %Ht, %Accumulate

+
+
+

}

+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
+
+ +
+
+
+
+

OnnxScan_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScan_16(*args, **kwargs)#
+

Version

+

Onnx name: Scan

+

This version of the operator has been available since +version 16.

+

Summary

+

Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from general recurrences, +functional programming constructs such as scan, fold, map, and zip, and is intended to enable +generalizations of RNN-like constructs for sequence-to-sequence processing. +Other tensors (referred to as state_variables here) can be used to carry a state +when iterating from one element to another (similar to hidden-state in RNNs, also referred +to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where functionality +similar to scan, fold and map can be obtained). When more than one scan_input is used, +a behavior similar to zip is obtained.

+

The attribute body must be a graph, specifying the computation to be performed in +every iteration. It takes as input the current values of the state_variables and +the current iterated element of the scan_inputs. It must return the (updated) values +of the state_variables and zero or more scan_output_element tensors. The values of the +scan_output_element tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated intermediate +hidden-state values of RNN-like constructs). All the output tensors (state_variables as +well as scan_output_element tensors) are required to have the same shape in each iteration +of the loop (a restriction imposed to enable efficient memory allocation).

+

Note that the iterated element passed to the body subgraph does not have a sequence +axis. It will have a rank one less than the rank of the corresponding scan_input.

+

The scan operation returns the final values of the state_variables as well as the +scan_outputs.

+

The optional attribute scan_input_directions specifies the direction (forward or backward) +for each scan input. If this attribute is omitted, all sequences are scanned in the forward +direction. A bidirectional scan may be performed by specifying the same tensor input twice +in the scan_inputs, once with a forward direction, and once with a backward direction.

+

The scan_output of the operation is produced by concatenating the scan_output_element +values produced by the body in each iteration. The optional attribute scan_output_directions +specifies the direction in which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each scan_output. If this attribute +is omitted, the scan_output_element is appended to the scan_output in each iteration.

+

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. +If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the +batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. +Note that scanning a non-zero axis may be less efficient than scanning axis zero.

+

The optional attribute scan_output_axes specifies the axis along which the scan_outputs +are accumulated for each scan_output. For example, if axis 1 is the time axis (to be +scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis +value of 1.

+

Note that because of the ONNX restriction that only the last parameter of an operator can +be variadic, the initial-states and scan-inputs are listed together as one input parameter. +Similarly, the final-states and scan-outputs are listed together as one output parameter. +The attribute num_scan_inputs indicates the number M of scan-inputs.

+

The behavior of

+
+
+
Scan <

num_scan_inputs = m, +body = loop-body, +scan_input_axes = [axis_1, …, axis_m]

+
+
+

> (init_1, …, init_n, scan_1, …, scan_m)

+
+

is equivalent to the following pseudo-code:

+
+

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i +// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. +sequence_length = scan_1.shape[axis_1];

+

// initialize state-variables +st_1 = init_1; … st_n = init_n; +// initialize scan-output variables: [] denotes an empty tensor +scan_out_1 = []; …; scan_out_k = []; +// identify number of iterations:

+

// execute loop +for (int t = 0; t < sequence_length; ++t) {

+
+

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor +// of rank one less than T obtained by indexing T at position t along axis k. +si_1 = scan_1<axis=axis_1>[t]; +… ; +si_m = scan_m<axis=axis_m>[t]; +// execute loop-body +st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) +// accumulate the scan-output elements +scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

+
+

}

+

return st_1, …, st_n, scan_out_1, …, scan_out_k;

+
+

Sample usage: Encoding RNN using a Scan

+

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, +recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can +be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes +%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these +values are computed in the outer graph, they need to be passed in as extra state_variables.

+
+
+
graph rnn-encoding {

%H_0 = … +%X = … +%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) +return %Y, %Y_h

+
+
+

}

+
+
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] +%X_t[FLOAT, tensor]

+
+
) {

%Wi = … +%Ri = … +%Wbi = … +%Rbi = … +%t1 = X_t * (Wi^T) +%t2 = H_tminus1*(Ri^T) +%t3 = Add(%t1, %t2) +%t4 = Add(%t3, %Wbi) +%t5 = Add(%t4, %Rbi) +%Ht = Tanh(%t5) +%Accumulate = Identity(%Ht) +return %Ht, %Accumulate

+
+
+

}

+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
+
+ +
+
+
+
+

OnnxScan_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScan_8(*args, **kwargs)#
+

Version

+

Onnx name: Scan

+

This version of the operator has been available since +version 8.

+

Summary

+

Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from general recurrences, +functional programming constructs such as scan, fold, map, and zip, and is intended to enable +generalizations of RNN-like constructs for sequence-to-sequence processing. +Other tensors (referred to as state_variables here) can be used to carry a state +when iterating from one element to another (similar to hidden-state in RNNs, also referred +to as loop-carried dependences in the context of loops). All these tensors are required to +have the same shape in each iteration of the loop (a restriction imposed to enable efficient +memory allocation). Many common usages involve a single scan_input tensor (where functionality +similar to scan, fold and map can be obtained). When more than one scan_input is used, +a behavior similar to zip is obtained.

+

The attribute body must be a graph, specifying the computation to be performed in +every iteration. It takes as input the current values of the state_variables and +the current iterated element of the scan_inputs. It must return the (updated) values +of the state_variables and zero or more scan_output_element tensors. The values of the +scan_output_element tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated intermediate +hidden-state values of RNN-like constructs).

+

The scan operation returns the final values of the state_variables as well as the +scan_outputs.

+

The operation supports batching, and the batch-axis is required to be 0. +When multiple scan_input tensors are used, they must all have the same batch-size, +and they must all have the same maximum-sequence-length (the dimensionality of the +sequence axis or scan axis). The sequence axis or scan axis is required to be 1.

+

The operation has an optional sequence_lens input (of shape [BATCH_SIZE]) to +allow variable length sequences of length <= the maximum-sequence-length. If this +input is not specified, all sequences are assumed to be of length equal to +maximum-sequence-length. For variable length input sequences, the scan_outputs +will consist of a sequence of same length as the input, padded to the +maximum-sequence-length.

+

The optional attribute directions can be used to scan a sequence in the reverse direction. +If this attribute is omitted, all sequences are scanned in the forward direction. +A bidirectional scan be performed by specifying the same tensor input twice in the +scan_inputs, once with a forward direction, and once with a backward direction.

+

Note that because of the ONNX restriction that only the last parameter of an operator can +be variadic, the initial-states and scan-inputs are listed together as one input parameter. +Similarly, the final-states and scan-outputs are listed together as one output parameter. +The attribute num_scan_inputs indicates the number M of scan-inputs.

+

The behavior of

+
+
+
Scan <

num_scan_inputs = m, +body = loop-body

+
+
+

> (sequence_lengths, init_1, …, init_n, scan_1, …, scan_m)

+
+

is equivalent to the following pseudo-code:

+
+

// T.shape[0] denotes the batch-size of T +// The batch-size of scan_1, …, scan_m are all required to be equal +batch_size = scan_1.shape[0];

+

// scan_i.shape[1] denotes the (max) sequence-length of scan_i +// scan_i.shape[1] is required to be equal to scan_j.shape[1] for all i,j. +max_sequence_length = scan_1.shape[1];

+
+
for (int batch = 0; batch < batch_size; ++batch) {

// initialize state-variables +st_1 = init_1; … st_n = init_n; +// initialize scan-output variables: [] denotes an empty tensor +scan_out_1 = []; …; scan_out_k = []; +// identify number of iterations: +N = (sequence_lengths specified) ? sequence_lengths[batch] : max_sequence_length;

+

// execute loop +for (int t = 0; t < N; ++t) {

+
+

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor +// of rank one less than T obtained by indexing T at position t along axis k. +si_1 = (scan_1<axis=0>[batch])<axis=1>[t]; +… ; +si_m = (scan_m<axis=0>[batch])<axis=1>[t]; +// execute loop-body +st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) +// accumulate the scan-output elements +scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

+
+

} +// accumulate the outputs for this batch: +bst_1[batch] = st_1; …, bst_n[batch] = st_n; +// Note scan-outputs will have size max_sequence_length, but only first N values will be meaningful. +// The remaining values have an undefined value. +b_scan_out_1[batch] = scan_out_1; …; b_scan_out_k[batch] = scan_out_k;

+
+
+

} +return bst_1, …, bst_n, b_scan_out_1, …, b_scan_out_k;

+
+

Sample usage: Encoding RNN using a Scan

+

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, +recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can +be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes +%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these +values are computed in the outer graph, they need to be passed in as extra state_variables.

+
+
+
graph rnn-encoding {

%H_0 = … +%X = … +%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](“”, %H_0, %X) +return %Y, %Y_h

+
+
+

}

+
+
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] +%X_t[FLOAT, tensor]

+
+
) {

%Wi = … +%Ri = … +%Wbi = … +%Rbi = … +%t1 = X_t * (Wi^T) +%t2 = H_tminus1*(Ri^T) +%t3 = Add(%t1, %t2) +%t4 = Add(%t3, %Wbi) +%t5 = Add(%t4, %Rbi) +%Ht = Tanh(%t5) +%Accumulate = Identity(%Ht) +return %Ht, %Accumulate

+
+
+

}

+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+

Between 2 and 2147483647 inputs.

+
    +
  • sequence_lens (optional, heterogeneous)I: Optional tensor specifying lengths of the sequences in a batch. If this input is not specified, all sequences are assumed to be of the maximum sequence length (the dimension of the sequence axis of the scan_input tensors).

  • +
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • +
+

Type Constraints

+
    +
  • I tensor(int64): Int64 tensor

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
+
+ +
+
+
+
+

OnnxScan_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScan_9(*args, **kwargs)#
+

Version

+

Onnx name: Scan

+

This version of the operator has been available since +version 9.

+

Summary

+

Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from general recurrences, +functional programming constructs such as scan, fold, map, and zip, and is intended to enable +generalizations of RNN-like constructs for sequence-to-sequence processing. +Other tensors (referred to as state_variables here) can be used to carry a state +when iterating from one element to another (similar to hidden-state in RNNs, also referred +to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where functionality +similar to scan, fold and map can be obtained). When more than one scan_input is used, +a behavior similar to zip is obtained.

+

The attribute body must be a graph, specifying the computation to be performed in +every iteration. It takes as input the current values of the state_variables and +the current iterated element of the scan_inputs. It must return the (updated) values +of the state_variables and zero or more scan_output_element tensors. The values of the +scan_output_element tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated intermediate +hidden-state values of RNN-like constructs). All the output tensors (state_variables as +well as scan_output_element tensors) are required to have the same shape in each iteration +of the loop (a restriction imposed to enable efficient memory allocation).

+

Note that the iterated element passed to the body subgraph does not have a sequence +axis. It will have a rank one less than the rank of the corresponding scan_input.

+

The scan operation returns the final values of the state_variables as well as the +scan_outputs.

+

The optional attribute scan_input_directions specifies the direction (forward or backward) +for each scan input. If this attribute is omitted, all sequences are scanned in the forward +direction. A bidirectional scan may be performed by specifying the same tensor input twice +in the scan_inputs, once with a forward direction, and once with a backward direction.

+

The scan_output of the operation is produced by concatenating the scan_output_element +values produced by the body in each iteration. The optional attribute scan_output_directions +specifies the direction in which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each scan_output. If this attribute +is omitted, the scan_output_element is appended to the scan_output in each iteration.

+

The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. +If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the +batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. +Note that scanning a non-zero axis may be less efficient than scanning axis zero.

+

The optional attribute scan_output_axes specifies the axis along which the scan_outputs +are accumulated for each scan_output. For example, if axis 1 is the time axis (to be +scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis +value of 1.

+

Note that because of the ONNX restriction that only the last parameter of an operator can +be variadic, the initial-states and scan-inputs are listed together as one input parameter. +Similarly, the final-states and scan-outputs are listed together as one output parameter. +The attribute num_scan_inputs indicates the number M of scan-inputs.

+

The behavior of

+
+
+
Scan <

num_scan_inputs = m, +body = loop-body, +scan_input_axes = [axis_1, …, axis_m]

+
+
+

> (init_1, …, init_n, scan_1, …, scan_m)

+
+

is equivalent to the following pseudo-code:

+
+

// scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i +// scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. +sequence_length = scan_1.shape[axis_1];

+

// initialize state-variables +st_1 = init_1; … st_n = init_n; +// initialize scan-output variables: [] denotes an empty tensor +scan_out_1 = []; …; scan_out_k = []; +// identify number of iterations:

+

// execute loop +for (int t = 0; t < sequence_length; ++t) {

+
+

// generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor +// of rank one less than T obtained by indexing T at position t along axis k. +si_1 = scan_1<axis=axis_1>[t]; +… ; +si_m = scan_m<axis=axis_m>[t]; +// execute loop-body +st_1, …, st_n, so_1, …, so_k = loop-body(st_1, …, st_n, si_1, …, si_m) +// accumulate the scan-output elements +scan_out_1 = Concat<axis=0>(scan_out_1, so_1); … ; scan_out_k = Concat<axis=0>(scan_out_k, so_k);

+
+

}

+

return st_1, …, st_n, scan_out_1, …, scan_out_k;

+
+

Sample usage: Encoding RNN using a Scan

+

The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, +recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can +be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes +%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these +values are computed in the outer graph, they need to be passed in as extra state_variables.

+
+
+
graph rnn-encoding {

%H_0 = … +%X = … +%Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) +return %Y, %Y_h

+
+
+

}

+
+
graph rnn-cell-1 (

%H_tminus1[FLOAT, tensor] +%X_t[FLOAT, tensor]

+
+
) {

%Wi = … +%Ri = … +%Wbi = … +%Rbi = … +%t1 = X_t * (Wi^T) +%t2 = H_tminus1*(Ri^T) +%t3 = Add(%t1, %t2) +%t4 = Add(%t3, %Wbi) +%t5 = Add(%t4, %Rbi) +%Ht = Tanh(%t5) +%Accumulate = Identity(%Ht) +return %Ht, %Accumulate

+
+
+

}

+
+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • initial_state_and_scan_inputs (variadic)V: Initial values of the loop’s N state variables followed by M scan_inputs

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • final_state_and_scan_outputs (variadic)V: Final values of the loop’s N state variables followed by K scan_outputs

  • +
+

Type Constraints

+
    +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): All Tensor types

  • +
+
+ +
+
+
+
+

OnnxScatter#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatter(*args, **kwargs)#
+

Version

+

Onnx name: Scatter

+

This version of the operator has been deprecated since +version 11.

+

Summary

+

This operator is deprecated. Please use ScatterElements, which provides the same functionality.

+

Scatter takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry +is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterElements#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterElements(*args, **kwargs)#
+

Version

+

Onnx name: ScatterElements

+

This version of the operator has been available since +version 18.

+

Summary

+

ScatterElements takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update +corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

When reduction is set to some reduction function f, the update corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0,
+output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1,
+
+
+

where the f is +, *, max or min as specified.

+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

(Opset 18 change): Adds max/min to the set of allowed reduction ops.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • reduction: Type of reduction to apply: none (default), add, mul, max, min. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation.’max’: reduction using the maximum operation.’min’: reduction using the minimum operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterElements_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterElements_11(*args, **kwargs)#
+

Version

+

Onnx name: ScatterElements

+

This version of the operator has been available since +version 11.

+

Summary

+

ScatterElements takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry +is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterElements_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterElements_13(*args, **kwargs)#
+

Version

+

Onnx name: ScatterElements

+

This version of the operator has been available since +version 13.

+

Summary

+

ScatterElements takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry +is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterElements_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterElements_16(*args, **kwargs)#
+

Version

+

Onnx name: ScatterElements

+

This version of the operator has been available since +version 16.

+

Summary

+

ScatterElements takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data. +For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself. +reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update +corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

When reduction is set to “add”, the update corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] += updates[i][j] if axis = 0,
+output[i][indices[i][j]] += updates[i][j] if axis = 1,
+
+
+

When reduction is set to “mul”, the update corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] *= updates[i][j] if axis = 0,
+output[i][indices[i][j]] *= updates[i][j] if axis = 1,
+
+
+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation. +Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterElements_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterElements_18(*args, **kwargs)#
+

Version

+

Onnx name: ScatterElements

+

This version of the operator has been available since +version 18.

+

Summary

+

ScatterElements takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update +corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

When reduction is set to some reduction function f, the update corresponding to the [i][j] entry is performed as below:

+
output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0,
+output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1,
+
+
+

where the f is +, *, max or min as specified.

+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

(Opset 18 change): Adds max/min to the set of allowed reduction ops.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
  • reduction: Type of reduction to apply: none (default), add, mul, max, min. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation.’max’: reduction using the maximum operation.’min’: reduction using the minimum operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatterND#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterND(*args, **kwargs)#
+

Version

+

Onnx name: ScatterND

+

This version of the operator has been available since +version 18.

+

Summary

+

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, +and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation +is produced by creating a copy of the input data, and then updating its value to values +specified by updates at specific index positions specified by indices. Its output shape +is the same as the shape of data.

+

indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices. +indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data. +Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) each update entry specifies an +update to a slice of the tensor. Index values are allowed to be negative, as per the usual +convention for counting backwards from the end, but are expected in the valid range.

+

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the +first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of updates correspond to the dimensions of the +replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, +corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates +must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation +of shapes.

+

The output is calculated via the following equation:

+
output = np.copy(data)
+update_indices = indices.shape[:-1]
+for idx in np.ndindex(update_indices):
+    output[indices[idx]] = updates[idx]
+
+
+

The order of iteration in the above loop is not specified. +In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. +This ensures that the output value does not depend on the iteration order.

+

reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. +When reduction is set to some reduction function f, output is calculated as follows:

+
output = np.copy(data)
+update_indices = indices.shape[:-1]
+for idx in np.ndindex(update_indices):
+    output[indices[idx]] = f(output[indices[idx]], updates[idx])
+
+
+

where the f is +, *, max or min as specified.

+

This operator is the inverse of GatherND.

+

(Opset 18 change): Adds max/min to the set of allowed reduction ops.

+

Example 1:

+
data    = [1, 2, 3, 4, 5, 6, 7, 8]
+indices = [[4], [3], [1], [7]]
+updates = [9, 10, 11, 12]
+output  = [1, 11, 3, 10, 9, 6, 7, 12]
+
+
+

Example 2:

+
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+indices = [[0], [2]]
+updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+            [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
+output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+            [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+
+
+

Attributes

+
    +
  • reduction: Type of reduction to apply: none (default), add, mul, max, min. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the addition operation. ‘max’: reduction using the maximum operation.’min’: reduction using the minimum operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • +
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxScatterND_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterND_11(*args, **kwargs)#
+

Version

+

Onnx name: ScatterND

+

This version of the operator has been available since +version 11.

+

Summary

+

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, +and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation +is produced by creating a copy of the input data, and then updating its value to values +specified by updates at specific index positions specified by indices. Its output shape +is the same as the shape of data. Note that indices should not have duplicate entries. +That is, two or more updates for the same index-location is not supported.

+
+
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

+
+
+

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) each update entry specifies an +update to a slice of the tensor. Index values are allowed to be negative, as per the usual +convention for counting backwards from the end, but are expected in the valid range.

+

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the +first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of updates correspond to the dimensions of the +replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, +corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates +must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation +of shapes.

+

The output is calculated via the following equation:

+
+

output = np.copy(data) +update_indices = indices.shape[:-1] +for idx in np.ndindex(update_indices):

+
+

output[indices[idx]] = updates[idx]

+
+
+

The order of iteration in the above loop is not specified. +In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. +This ensures that the output value does not depend on the iteration order.

+

This operator is the inverse of GatherND.

+

Example 1:

+
data    = [1, 2, 3, 4, 5, 6, 7, 8]
+indices = [[4], [3], [1], [7]]
+updates = [9, 10, 11, 12]
+output  = [1, 11, 3, 10, 9, 6, 7, 12]
+
+
+

Example 2:

+
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+indices = [[0], [2]]
+updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
+output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+
+
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • +
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxScatterND_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterND_13(*args, **kwargs)#
+

Version

+

Onnx name: ScatterND

+

This version of the operator has been available since +version 13.

+

Summary

+

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, +and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation +is produced by creating a copy of the input data, and then updating its value to values +specified by updates at specific index positions specified by indices. Its output shape +is the same as the shape of data. Note that indices should not have duplicate entries. +That is, two or more updates for the same index-location is not supported.

+
+
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

+
+
+

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) each update entry specifies an +update to a slice of the tensor. Index values are allowed to be negative, as per the usual +convention for counting backwards from the end, but are expected in the valid range.

+

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the +first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of updates correspond to the dimensions of the +replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, +corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates +must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation +of shapes.

+

The output is calculated via the following equation:

+
+

output = np.copy(data) +update_indices = indices.shape[:-1] +for idx in np.ndindex(update_indices):

+
+

output[indices[idx]] = updates[idx]

+
+
+

The order of iteration in the above loop is not specified. +In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. +This ensures that the output value does not depend on the iteration order.

+

This operator is the inverse of GatherND.

+

Example 1:

+
data    = [1, 2, 3, 4, 5, 6, 7, 8]
+indices = [[4], [3], [1], [7]]
+updates = [9, 10, 11, 12]
+output  = [1, 11, 3, 10, 9, 6, 7, 12]
+
+
+

Example 2:

+
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+indices = [[0], [2]]
+updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
+output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+
+
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • +
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxScatterND_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterND_16(*args, **kwargs)#
+

Version

+

Onnx name: ScatterND

+

This version of the operator has been available since +version 16.

+

Summary

+

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, +and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation +is produced by creating a copy of the input data, and then updating its value to values +specified by updates at specific index positions specified by indices. Its output shape +is the same as the shape of data.

+
+
indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices.

indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data.

+
+
+

Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) each update entry specifies an +update to a slice of the tensor. Index values are allowed to be negative, as per the usual +convention for counting backwards from the end, but are expected in the valid range.

+

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the +first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of updates correspond to the dimensions of the +replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, +corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates +must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation +of shapes.

+
+
The output is calculated via the following equation:

output = np.copy(data) +update_indices = indices.shape[:-1] +for idx in np.ndindex(update_indices):

+
+

output[indices[idx]] = updates[idx]

+
+
+
+

The order of iteration in the above loop is not specified. +In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. +This ensures that the output value does not depend on the iteration order.

+

reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. +When reduction is set to “add”, output is calculated as follows:

+
+

output = np.copy(data) +update_indices = indices.shape[:-1] +for idx in np.ndindex(update_indices):

+
+

output[indices[idx]] += updates[idx]

+
+
+
+
When reduction is set to “mul”, output is calculated as follows:

output = np.copy(data) +update_indices = indices.shape[:-1] +for idx in np.ndindex(update_indices):

+
+

output[indices[idx]] *= updates[idx]

+
+
+
+

This operator is the inverse of GatherND. +Example 1:

+
data    = [1, 2, 3, 4, 5, 6, 7, 8]
+indices = [[4], [3], [1], [7]]
+updates = [9, 10, 11, 12]
+output  = [1, 11, 3, 10, 9, 6, 7, 12]
+
+
+

Example 2:

+
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+indices = [[0], [2]]
+updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
+output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+           [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+           [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+           [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+
+
+

Attributes

+
    +
  • reduction: Type of reduction to apply: none (default), add, mul. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the multiplication operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • +
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxScatterND_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatterND_18(*args, **kwargs)#
+

Version

+

Onnx name: ScatterND

+

This version of the operator has been available since +version 18.

+

Summary

+

ScatterND takes three inputs data tensor of rank r >= 1, indices tensor of rank q >= 1, +and updates tensor of rank q + r - indices.shape[-1] - 1. The output of the operation +is produced by creating a copy of the input data, and then updating its value to values +specified by updates at specific index positions specified by indices. Its output shape +is the same as the shape of data.

+

indices is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of indices. +indices is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into data. +Hence, k can be a value at most the rank of data. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) each update entry specifies an +update to a slice of the tensor. Index values are allowed to be negative, as per the usual +convention for counting backwards from the end, but are expected in the valid range.

+

updates is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the +first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of updates correspond to the dimensions of the +replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, +corresponding to the trailing (r-k) dimensions of data. Thus, the shape of updates +must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation +of shapes.

+

The output is calculated via the following equation:

+
output = np.copy(data)
+update_indices = indices.shape[:-1]
+for idx in np.ndindex(update_indices):
+    output[indices[idx]] = updates[idx]
+
+
+

The order of iteration in the above loop is not specified. +In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. +This ensures that the output value does not depend on the iteration order.

+

reduction allows specification of an optional reduction operation, which is applied to all values in updates +tensor into output at the specified indices. +In cases where reduction is set to “none”, indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. +When reduction is set to some reduction function f, output is calculated as follows:

+
output = np.copy(data)
+update_indices = indices.shape[:-1]
+for idx in np.ndindex(update_indices):
+    output[indices[idx]] = f(output[indices[idx]], updates[idx])
+
+
+

where the f is +, *, max or min as specified.

+

This operator is the inverse of GatherND.

+

(Opset 18 change): Adds max/min to the set of allowed reduction ops.

+

Example 1:

+
data    = [1, 2, 3, 4, 5, 6, 7, 8]
+indices = [[4], [3], [1], [7]]
+updates = [9, 10, 11, 12]
+output  = [1, 11, 3, 10, 9, 6, 7, 12]
+
+
+

Example 2:

+
data    = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+indices = [[0], [2]]
+updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+            [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]
+output  = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+            [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+            [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]
+
+
+

Attributes

+
    +
  • reduction: Type of reduction to apply: none (default), add, mul, max, min. ‘none’: no reduction applied. ‘add’: reduction using the addition operation. ‘mul’: reduction using the addition operation. ‘max’: reduction using the maximum operation.’min’: reduction using the minimum operation. Default value is +name: "reduction" s: "none" type: STRING

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)tensor(int64): Tensor of rank q >= 1.

  • +
  • updates (heterogeneous)T: Tensor of rank q + r - indices_shape[-1] - 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxScatter_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatter_11(*args, **kwargs)#
+

Version

+

Onnx name: Scatter

+

This version of the operator has been deprecated since +version 11.

+

Summary

+

This operator is deprecated. Please use ScatterElements, which provides the same functionality.

+

Scatter takes three inputs data, updates, and indices of the same +rank r >= 1 and an optional attribute axis that identifies an axis of data +(by default, the outer-most axis, that is axis 0). The output of the operation +is produced by creating a copy of the input data, and then updating its value +to values specified by updates at specific index positions specified by +indices. Its output shape is the same as the shape of data.

+

For each entry in updates, the target index in data is obtained by combining +the corresponding entry in indices with the index of the entry itself: the +index-value for dimension = axis is obtained from the value of the corresponding +entry in indices and the index-value for dimension != axis is obtained from the +index of the entry itself.

+

For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry +is performed as below:

+
output[indices[i][j]][j] = updates[i][j] if axis = 0,
+output[i][indices[i][j]] = updates[i][j] if axis = 1,
+
+
+

This operator is the inverse of GatherElements. It is similar to Torch’s Scatter operation.

+

Example 1:

+
data = [
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+    [0.0, 0.0, 0.0],
+]
+indices = [
+    [1, 0, 2],
+    [0, 2, 1],
+]
+updates = [
+    [1.0, 1.1, 1.2],
+    [2.0, 2.1, 2.2],
+]
+output = [
+    [2.0, 1.1, 0.0]
+    [1.0, 0.0, 2.2]
+    [0.0, 2.1, 1.2]
+]
+
+
+

Example 2:

+
data = [[1.0, 2.0, 3.0, 4.0, 5.0]]
+indices = [[1, 3]]
+updates = [[1.1, 2.1]]
+axis = 1
+output = [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxScatter_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxScatter_9(*args, **kwargs)#
+

Version

+

Onnx name: Scatter

+

This version of the operator has been available since +version 9.

+

Summary

+

Given data, updates and indices input tensors of rank r >= 1, write the values provided by updates +into the first input, data, along axis dimension of data (by default outer-most one as axis=0) at corresponding indices. +For each entry in updates, the target index in data is specified by corresponding entry in indices +for dimension = axis, and index in source for dimension != axis. For instance, in a 2-D tensor case, +data[indices[i][j]][j] = updates[i][j] if axis = 0, or data[i][indices[i][j]] = updates[i][j] if axis = 1, +where i and j are loop counters from 0 up to the respective size in updates - 1. +Example 1:

+
+
+
data = [

[0.0, 0.0, 0.0], +[0.0, 0.0, 0.0], +[0.0, 0.0, 0.0],

+
+
+

] +indices = [

+
+

[1, 0, 2], +[0, 2, 1],

+
+

] +updates = [

+
+

[1.0, 1.1, 1.2], +[2.0, 2.1, 2.2],

+
+

] +output = [

+
+

[2.0, 1.1, 0.0] +[1.0, 0.0, 2.2] +[0.0, 2.1, 1.2]

+
+

]

+
+
+
Example 2:

data = [[1.0, 2.0, 3.0, 4.0, 5.0]] +indices = [[1, 3]] +updates = [[1.1, 2.1]] +axis = 1 +output = [[1.0, 1.1, 3.0, 2.1, 5.0]]

+
+
+

Attributes

+
    +
  • axis: Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of rank r >= 1.

  • +
  • indices (heterogeneous)Tind: Tensor of int32/int64 indices, of r >= 1 (same rank as input).

  • +
  • updates (heterogeneous)T: Tensor of rank r >=1 (same rank and shape as indices)

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of rank r >= 1 (same rank as input).

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input and output types can be of any tensor type.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxSelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSelu(*args, **kwargs)#
+

Version

+

Onnx name: Selu

+

This version of the operator has been available since +version 6.

+

Summary

+

Selu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the scaled exponential linear unit function, +y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717). Default value is +name: "alpha" f: 1.6732631921768188 type: FLOAT

  • +
  • gamma: Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946). Default value is +name: "gamma" f: 1.0507010221481323 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSelu_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSelu_1(*args, **kwargs)#
+

Version

+

Onnx name: Selu

+

This version of the operator has been available since +version 1.

+

Summary

+

Selu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the scaled exponential linear unit function, +y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of SELU default to 1.6732. Default value is +name: "alpha" f: 1.673200011253357 type: FLOAT

  • +
  • +
  • gamma: Coefficient of SELU default to 1.0507. Default value is +name: "gamma" f: 1.0506999492645264 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSelu_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSelu_6(*args, **kwargs)#
+

Version

+

Onnx name: Selu

+

This version of the operator has been available since +version 6.

+

Summary

+

Selu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the scaled exponential linear unit function, +y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0, +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717). Default value is +name: "alpha" f: 1.6732631921768188 type: FLOAT

  • +
  • gamma: Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946). Default value is +name: "gamma" f: 1.0507010221481323 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSequenceAt#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceAt(*args, **kwargs)#
+

Version

+

Onnx name: SequenceAt

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor copy from the tensor at ‘position’ in ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back.

+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • position (heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • tensor (heterogeneous)T: Output tensor at the specified position in the input sequence.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceAt_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceAt_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceAt

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor copy from the tensor at ‘position’ in ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back.

+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • position (heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • tensor (heterogeneous)T: Output tensor at the specified position in the input sequence.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceConstruct#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct(*args, **kwargs)#
+

Version

+

Onnx name: SequenceConstruct

+

This version of the operator has been available since +version 11.

+

Summary

+

Construct a tensor sequence containing ‘inputs’ tensors. +All tensors in ‘inputs’ must have the same data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: Tensors.

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Sequence enclosing the input tensors.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to any tensor type.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxSequenceConstruct_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceConstruct_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceConstruct

+

This version of the operator has been available since +version 11.

+

Summary

+

Construct a tensor sequence containing ‘inputs’ tensors. +All tensors in ‘inputs’ must have the same data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • inputs (variadic, heterogeneous)T: Tensors.

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Sequence enclosing the input tensors.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to any tensor type.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxSequenceEmpty#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty(*args, **kwargs)#
+

Version

+

Onnx name: SequenceEmpty

+

This version of the operator has been available since +version 11.

+

Summary

+

Construct an empty tensor sequence, with given data type.

+

Attributes

+
    +
  • +
+

Outputs

+
    +
  • output (heterogeneous)S: Empty sequence.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxSequenceEmpty_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceEmpty_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceEmpty

+

This version of the operator has been available since +version 11.

+

Summary

+

Construct an empty tensor sequence, with given data type.

+

Attributes

+
    +
  • +
+

Outputs

+
    +
  • output (heterogeneous)S: Empty sequence.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to any tensor type.

  • +
+
+ +
+
+
+
+

OnnxSequenceErase#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceErase(*args, **kwargs)#
+

Version

+

Onnx name: SequenceErase

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor sequence that removes the tensor at ‘position’ from ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back. +‘position’ is optional, by default it erases the last tensor from ‘input_sequence’.

+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • position (optional, heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Output sequence that has the tensor at the specified position removed.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceErase_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceErase_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceErase

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor sequence that removes the tensor at ‘position’ from ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back. +‘position’ is optional, by default it erases the last tensor from ‘input_sequence’.

+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • position (optional, heterogeneous)I: Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in [-n, n - 1], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Output sequence that has the tensor at the specified position removed.

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceInsert#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceInsert(*args, **kwargs)#
+

Version

+

Onnx name: SequenceInsert

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor sequence that inserts ‘tensor’ into ‘input_sequence’ at ‘position’. +‘tensor’ must have the same data type as ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back. +‘position’ is optional, by default it inserts ‘tensor’ to the back of ‘input_sequence’.

+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • tensor (heterogeneous)T: Input tensor to be inserted into the input sequence.

  • +
  • position (optional, heterogeneous)I: Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in [-n, n], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Output sequence that contains the inserted tensor at given position.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceInsert_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceInsert_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceInsert

+

This version of the operator has been available since +version 11.

+

Summary

+

Outputs a tensor sequence that inserts ‘tensor’ into ‘input_sequence’ at ‘position’. +‘tensor’ must have the same data type as ‘input_sequence’. +Accepted range for ‘position’ is in [-n, n], where n is the number of tensors in ‘input_sequence’. +Negative value means counting positions from the back. +‘position’ is optional, by default it inserts ‘tensor’ to the back of ‘input_sequence’.

+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • tensor (heterogeneous)T: Input tensor to be inserted into the input sequence.

  • +
  • position (optional, heterogeneous)I: Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in [-n, n], where n is the number of tensors in ‘input_sequence’. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: Output sequence that contains the inserted tensor at given position.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain to any tensor type.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int32), tensor(int64): Constrain position to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceLength#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceLength(*args, **kwargs)#
+

Version

+

Onnx name: SequenceLength

+

This version of the operator has been available since +version 11.

+

Summary

+

Produces a scalar(tensor of empty shape) containing the number of tensors in ‘input_sequence’.

+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
+

Outputs

+
    +
  • length (heterogeneous)I: Length of input sequence. It must be a scalar(tensor of empty shape).

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int64): Constrain output to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceLength_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceLength_11(*args, **kwargs)#
+

Version

+

Onnx name: SequenceLength

+

This version of the operator has been available since +version 11.

+

Summary

+

Produces a scalar(tensor of empty shape) containing the number of tensors in ‘input_sequence’.

+

Inputs

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
+

Outputs

+
    +
  • length (heterogeneous)I: Length of input sequence. It must be a scalar(tensor of empty shape).

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor type.

  • +
  • I tensor(int64): Constrain output to integral tensor. It must be a scalar(tensor of empty shape).

  • +
+
+ +
+
+
+
+

OnnxSequenceMap#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceMap(*args, **kwargs)#
+

Version

+

Onnx name: SequenceMap

+

This version of the operator has been available since +version 17.

+

Summary

+

Applies a sub-graph to each sample in the input sequence(s).

+

Inputs can be either tensors or sequences, with the exception of the first input which must +be a sequence. The length of the first input sequence will determine the number of samples in the +outputs. Any other sequence inputs should have the same number of samples. The number of inputs +and outputs, should match the one of the subgraph.

+

For each i-th element in the output, a sample will be extracted from the input sequence(s) at +the i-th position and the sub-graph will be applied to it. +The outputs will contain the outputs of the sub-graph for each sample, in the same order as in +the input.

+

This operator assumes that processing each sample is independent and could executed in parallel +or in any order. Users cannot expect any specific ordering in which each subgraph is computed.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • additional_inputs (variadic)V: Additional inputs to the graph

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • out_sequence (variadic)S: Output sequence(s)

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any sequence type.

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor or sequence type.

  • +
+
+ +
+
+
+
+

OnnxSequenceMap_17#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSequenceMap_17(*args, **kwargs)#
+

Version

+

Onnx name: SequenceMap

+

This version of the operator has been available since +version 17.

+

Summary

+

Applies a sub-graph to each sample in the input sequence(s).

+

Inputs can be either tensors or sequences, with the exception of the first input which must +be a sequence. The length of the first input sequence will determine the number of samples in the +outputs. Any other sequence inputs should have the same number of samples. The number of inputs +and outputs, should match the one of the subgraph.

+

For each i-th element in the output, a sample will be extracted from the input sequence(s) at +the i-th position and the sub-graph will be applied to it. +The outputs will contain the outputs of the sub-graph for each sample, in the same order as in +the input.

+

This operator assumes that processing each sample is independent and could executed in parallel +or in any order. Users cannot expect any specific ordering in which each subgraph is computed.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • input_sequence (heterogeneous)S: Input sequence.

  • +
  • additional_inputs (variadic)V: Additional inputs to the graph

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • out_sequence (variadic)S: Output sequence(s)

  • +
+

Type Constraints

+
    +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain input types to any sequence type.

  • +
  • V tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128), seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain to any tensor or sequence type.

  • +
+
+ +
+
+
+
+

OnnxShape#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShape(*args, **kwargs)#
+

Version

+

Onnx name: Shape

+

This version of the operator has been available since +version 15.

+

Summary

+

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. +Optional attributes start and end can be used to compute a slice of the input tensor’s shape. +If start axis is omitted, the slice starts from axis 0. +The end axis, if specified, is exclusive (and the returned value will not include the size of that axis). +If the end axis is omitted, the axes upto the last one will be included. +Negative axes indicate counting back from the last axis. +Note that axes will be clamped to the range [0, r-1], where r is the +rank of the input tensor if they are out-of-range (after adding r in the case of +negative axis). Thus, specifying any end value > r is equivalent to specifying an end +value of r, and specifying any start value < -r is equivalent to specifying a start +value of 0.

+

Examples:

+
Input tensor with shape: [2, 3, 4]
+No attributes specified.
+Output: [2, 3, 4]
+
+
+
Input tensor with shape: [2, 3, 4]
+start: -1
+Output: [4]
+
+
+
Input tensor with shape: [2, 3, 4]
+end: -1
+Output: [2, 3]
+
+
+
Input tensor with shape: [2, 3, 4]
+start: 1
+end: 2
+Output: [3]
+
+
+

Attributes

+
    +
  • +
  • start: (Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back. Default value is +name: "start" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • shape (heterogeneous)T1: Shape of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor.

  • +
+
+ +
+
+
+
+

OnnxShape_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShape_1(*args, **kwargs)#
+

Version

+

Onnx name: Shape

+

This version of the operator has been available since +version 1.

+

Summary

+

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • shape (heterogeneous)T1: Shape of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor.

  • +
+
+ +
+
+
+
+

OnnxShape_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShape_13(*args, **kwargs)#
+

Version

+

Onnx name: Shape

+

This version of the operator has been available since +version 13.

+

Summary

+

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • shape (heterogeneous)T1: Shape of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor.

  • +
+
+ +
+
+
+
+

OnnxShape_15#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShape_15(*args, **kwargs)#
+

Version

+

Onnx name: Shape

+

This version of the operator has been available since +version 15.

+

Summary

+

Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. +Optional attributes start and end can be used to compute a slice of the input tensor’s shape. +If start axis is omitted, the slice starts from axis 0. +The end axis, if specified, is exclusive (and the returned value will not include the size of that axis). +If the end axis is omitted, the axes upto the last one will be included. +Negative axes indicate counting back from the last axis. +Note that axes will be clamped to the range [0, r-1], where r is the +rank of the input tensor if they are out-of-range (after adding r in the case of +negative axis). Thus, specifying any end value > r is equivalent to specifying an end +value of r, and specifying any start value < -r is equivalent to specifying a start +value of 0.

+

Examples:

+
Input tensor with shape: [2, 3, 4]
+No attributes specified.
+Output: [2, 3, 4]
+
+
+
Input tensor with shape: [2, 3, 4]
+start: -1
+Output: [4]
+
+
+
Input tensor with shape: [2, 3, 4]
+end: -1
+Output: [2, 3]
+
+
+
Input tensor with shape: [2, 3, 4]
+start: 1
+end: 2
+Output: [3]
+
+
+

Attributes

+
    +
  • +
  • start: (Optional) Starting axis for slicing the shape. Default value is 0.Negative value means counting dimensions from the back. Default value is +name: "start" i: 0 type: INT

  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • shape (heterogeneous)T1: Shape of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor.

  • +
+
+ +
+
+
+
+

OnnxShrink#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShrink(*args, **kwargs)#
+

Version

+

Onnx name: Shrink

+

This version of the operator has been available since +version 9.

+

Summary

+

Shrink takes one input data (Tensor<numeric>) and produces one Tensor output, +having same datatype and shape with input. It has two attributes, lambd and +bias. The formula of this operator is: If x < -lambd, y = x + bias; +If x > lambd, y = x - bias; Otherwise, y = 0.

+

Attributes

+
    +
  • bias: The bias value added to output. Default is 0. Default value is +name: "bias" f: 0.0 type: FLOAT

  • +
  • lambd: The lambd value for the Shrink formulation. Default is 0.5. Default value is +name: "lambd" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
+
+ +
+
+
+
+

OnnxShrink_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxShrink_9(*args, **kwargs)#
+

Version

+

Onnx name: Shrink

+

This version of the operator has been available since +version 9.

+

Summary

+

Shrink takes one input data (Tensor<numeric>) and produces one Tensor output, +having same datatype and shape with input. It has two attributes, lambd and +bias. The formula of this operator is: If x < -lambd, y = x + bias; +If x > lambd, y = x - bias; Otherwise, y = 0.

+

Attributes

+
    +
  • bias: The bias value added to output. Default is 0. Default value is +name: "bias" f: 0.0 type: FLOAT

  • +
  • lambd: The lambd value for the Shrink formulation. Default is 0.5. Default value is +name: "lambd" f: 0.5 type: FLOAT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input data as Tensor.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input to only numeric types.

  • +
+
+ +
+
+
+
+

OnnxSigmoid#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSigmoid(*args, **kwargs)#
+

Version

+

Onnx name: Sigmoid

+

This version of the operator has been available since +version 13.

+

Summary

+

Sigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the +tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSigmoid_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSigmoid_1(*args, **kwargs)#
+

Version

+

Onnx name: Sigmoid

+

This version of the operator has been available since +version 1.

+

Summary

+

Sigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the +tensor elementwise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSigmoid_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSigmoid_13(*args, **kwargs)#
+

Version

+

Onnx name: Sigmoid

+

This version of the operator has been available since +version 13.

+

Summary

+

Sigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the +tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSigmoid_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSigmoid_6(*args, **kwargs)#
+

Version

+

Onnx name: Sigmoid

+

This version of the operator has been available since +version 6.

+

Summary

+

Sigmoid takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the +tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSign#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSign(*args, **kwargs)#
+

Version

+

Onnx name: Sign

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculate the sign of the given input tensor element-wise. +If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSign_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSign_13(*args, **kwargs)#
+

Version

+

Onnx name: Sign

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculate the sign of the given input tensor element-wise. +If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSign_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSign_9(*args, **kwargs)#
+

Version

+

Onnx name: Sign

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculate the sign of the given input tensor element-wise. +If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The sign of the input tensor computed element-wise. It has the same shape and type of the input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSin#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSin(*args, **kwargs)#
+

Version

+

Onnx name: Sin

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the sine of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The sine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSin_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSin_7(*args, **kwargs)#
+

Version

+

Onnx name: Sin

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the sine of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The sine of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSinh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSinh(*args, **kwargs)#
+

Version

+

Onnx name: Sinh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic sine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic sine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSinh_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSinh_9(*args, **kwargs)#
+

Version

+

Onnx name: Sinh

+

This version of the operator has been available since +version 9.

+

Summary

+

Calculates the hyperbolic sine of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic sine values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSize#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSize(*args, **kwargs)#
+

Version

+

Onnx name: Size

+

This version of the operator has been available since +version 13.

+

Summary

+

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • +
+
+ +
+
+
+
+

OnnxSize_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSize_1(*args, **kwargs)#
+

Version

+

Onnx name: Size

+

This version of the operator has been available since +version 1.

+

Summary

+

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • +
+
+ +
+
+
+
+

OnnxSize_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSize_13(*args, **kwargs)#
+

Version

+

Onnx name: Size

+

This version of the operator has been available since +version 13.

+

Summary

+

Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.

+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • size (heterogeneous)T1: Total number of elements of the input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input tensor can be of arbitrary type.

  • +
  • T1 tensor(int64): Constrain output to int64 tensor, which should be a scalar though.

  • +
+
+ +
+
+
+
+

OnnxSlice#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSlice(*args, **kwargs)#
+

Version

+

Onnx name: Slice

+

This version of the operator has been available since +version 13.

+

Summary

+

Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding

+

Slice uses the starts, ends, axes and steps inputs to select a sub-tensor +of its input data tensor.

+

An effective start[i], end[i], and step[i] must be computed for each i +in [0, … r-1] where r = rank(input) as follows:

+

If axes are omitted, they are set to [0, …, r-1]. +If steps are omitted, they are set to [1, …, 1] of length len(starts)

+

The effective values are initialized as start[i] = 0, end[i] = dims[i] where +dims are the dimensions of input and `step[i] = `1.

+

All negative elements of axes are made non-negatve by adding r to them, where +r =rank(input).

+

All negative values in starts[i] and ends[i] have dims[axes[i]] added to them, +where dims are the dimensions of input. Then start[axes[i]] is the adjusted +starts[i] is clamped into the range [0, dims[axes[i]]] for positive stepping +and [0, dims[axes[i]]-1] for negative stepping.

+

The clamping for the adjusted ends[i] depends on the sign of steps[i] and must +accommodate copying 0 through dims[axes[i]] elements, so for positive stepping +end[axes[i]] is clamped to [0, dims[axes[i]]], while for negative stepping it +is clamped to [-1, dims[axes[i]]-1].

+

Finally, step[axes[i]] = steps[i].

+

For slicing to the end of a dimension with unknown size, it is recommended to pass +in INT_MAX when slicing forward and ‘INT_MIN’ when slicing backward.

+

Example 1:

+
data = [
+    [1, 2, 3, 4],
+    [5, 6, 7, 8],
+]
+axes = [0, 1]
+starts = [1, 0]
+ends = [2, 3]
+steps = [1, 2]
+result = [
+    [5, 7],
+]
+
+
+

Example 2:

+
data = [
+    [1, 2, 3, 4],
+    [5, 6, 7, 8],
+]
+starts = [0, 1]
+ends = [-1, 1000]
+result = [
+    [2, 3, 4],
+]
+
+
+

Inputs

+

Between 3 and 5 inputs.

+
    +
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • +
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • +
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated.

  • +
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1s.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Sliced data tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxSlice_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSlice_1(*args, **kwargs)#
+

Version

+

Onnx name: Slice

+

This version of the operator has been available since +version 1.

+

Summary

+

Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html +Slices uses axes, starts and ends attributes to specify the start and end +dimension for each axis in the list of axes, it uses this information to +slice the input data tensor. If a negative value is passed for any of the +start or end indices, it represent number of elements before the end of that +dimension. If the value passed to start or end is larger than the n (the +number of elements in this dimension), it represents n. For slicing to the +end of a dimension with unknown size, it is recommended to pass in INT_MAX. +If axes are omitted, they are set to [0, …, ndim-1]. +Example 1:

+
+
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +axes = [0, 1] +starts = [1, 0] +ends = [2, 3] +result = [

+
+

[5, 6, 7],

+
+

]

+
+
+
Example 2:
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +starts = [0, 1] +ends = [-1, 1000] +result = [

+
+

[2, 3, 4],

+
+

]

+
+
+

Attributes

+
    +
  • +
  • +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Sliced data tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSlice_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSlice_10(*args, **kwargs)#
+

Version

+

Onnx name: Slice

+

This version of the operator has been available since +version 10.

+

Summary

+

Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html +Slices uses starts, ends, axes and steps inputs to specify the start and end +dimension and step for each axis in the list of axes, it uses this information to +slice the input data tensor. If a negative value is passed for any of the +start or end indices, it represent number of elements before the end of that +dimension. If the value passed to start or end is larger than the n (the +number of elements in this dimension), it represents n. For slicing to the +end of a dimension with unknown size, it is recommended to pass in INT_MAX. +If a negative value is passed for step, it represents slicing backward. +If axes are omitted, they are set to [0, …, ndim-1]. +If steps are omitted, they are set to [1, …, 1] of length len(starts) +Example 1:

+
+
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +axes = [0, 1] +starts = [1, 0] +ends = [2, 3] +steps = [1, 2] +result = [

+
+

[5, 7],

+
+

]

+
+
+
Example 2:
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +starts = [0, 1] +ends = [-1, 1000] +result = [

+
+

[2, 3, 4],

+
+

]

+
+
+

Inputs

+

Between 3 and 5 inputs.

+
    +
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • +
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • +
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to.

  • +
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Default to 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Sliced data tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxSlice_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSlice_11(*args, **kwargs)#
+

Version

+

Onnx name: Slice

+

This version of the operator has been available since +version 11.

+

Summary

+

Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html +Slices uses starts, ends, axes and steps inputs to specify the start and end +dimension and step for each axis in the list of axes, it uses this information to +slice the input data tensor. If a negative value is passed for any of the +start or end indices, it represents number of elements before the end of that +dimension. If the value passed to start or end is larger than the n (the +number of elements in this dimension), it represents n. For slicing to the +end of a dimension with unknown size, it is recommended to pass in INT_MAX +when slicing forward and ‘INT_MIN’ when slicing backward. +If a negative value is passed for step, it represents slicing backward. +However step value cannot be 0. +If axes are omitted, they are set to [0, …, ndim-1]. +If steps are omitted, they are set to [1, …, 1] of length len(starts) +Example 1:

+
+
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +axes = [0, 1] +starts = [1, 0] +ends = [2, 3] +steps = [1, 2] +result = [

+
+

[5, 7],

+
+

]

+
+
+
Example 2:
+
data = [

[1, 2, 3, 4], +[5, 6, 7, 8],

+
+
+

] +starts = [0, 1] +ends = [-1, 1000] +result = [

+
+

[2, 3, 4],

+
+

]

+
+
+

Inputs

+

Between 3 and 5 inputs.

+
    +
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • +
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • +
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • +
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Sliced data tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxSlice_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSlice_13(*args, **kwargs)#
+

Version

+

Onnx name: Slice

+

This version of the operator has been available since +version 13.

+

Summary

+

Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding

+

Slice uses the starts, ends, axes and steps inputs to select a sub-tensor +of its input data tensor.

+

An effective start[i], end[i], and step[i] must be computed for each i +in [0, … r-1] where r = rank(input) as follows:

+

If axes are omitted, they are set to [0, …, r-1]. +If steps are omitted, they are set to [1, …, 1] of length len(starts)

+

The effective values are initialized as start[i] = 0, end[i] = dims[i] where +dims are the dimensions of input and `step[i] = `1.

+

All negative elements of axes are made non-negatve by adding r to them, where +r =rank(input).

+

All negative values in starts[i] and ends[i] have dims[axes[i]] added to them, +where dims are the dimensions of input. Then start[axes[i]] is the adjusted +starts[i] is clamped into the range [0, dims[axes[i]]] for positive stepping +and [0, dims[axes[i]]-1] for negative stepping.

+

The clamping for the adjusted ends[i] depends on the sign of steps[i] and must +accommodate copying 0 through dims[axes[i]] elements, so for positive stepping +end[axes[i]] is clamped to [0, dims[axes[i]]], while for negative stepping it +is clamped to [-1, dims[axes[i]]-1].

+

Finally, step[axes[i]] = steps[i].

+

For slicing to the end of a dimension with unknown size, it is recommended to pass +in INT_MAX when slicing forward and ‘INT_MIN’ when slicing backward.

+

Example 1:

+
data = [
+    [1, 2, 3, 4],
+    [5, 6, 7, 8],
+]
+axes = [0, 1]
+starts = [1, 0]
+ends = [2, 3]
+steps = [1, 2]
+result = [
+    [5, 7],
+]
+
+
+

Example 2:

+
data = [
+    [1, 2, 3, 4],
+    [5, 6, 7, 8],
+]
+starts = [0, 1]
+ends = [-1, 1000]
+result = [
+    [2, 3, 4],
+]
+
+
+

Inputs

+

Between 3 and 5 inputs.

+
    +
  • data (heterogeneous)T: Tensor of data to extract slices from.

  • +
  • starts (heterogeneous)Tind: 1-D tensor of starting indices of corresponding axis in axes

  • +
  • ends (heterogeneous)Tind: 1-D tensor of ending indices (exclusive) of corresponding axis in axes

  • +
  • axes (optional, heterogeneous)Tind: 1-D tensor of axes that starts and ends apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined if an axis is repeated.

  • +
  • steps (optional, heterogeneous)Tind: 1-D tensor of slice step of corresponding axis in axes. Negative value means slicing backward. ‘steps’ cannot be 0. Defaults to 1s.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Sliced data tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • Tind tensor(int32), tensor(int64): Constrain indices to integer types

  • +
+
+ +
+
+
+
+

OnnxSoftmax#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmax(*args, **kwargs)#
+

Version

+

Onnx name: Softmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the normalized exponential values for the given input:

+
+

Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1)

+
+

The “axis” attribute indicates the dimension along which Softmax +will be performed. The output tensor has the same shape +and contains the Softmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension Softmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftmaxCrossEntropyLoss#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss(*args, **kwargs)#
+

Version

+

Onnx name: SoftmaxCrossEntropyLoss

+

This version of the operator has been available since +version 13.

+

Summary

+

Loss function that measures the softmax cross entropy +between ‘scores’ and ‘labels’. +This operator first computes a loss tensor whose shape is identical to the labels input. +If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). +If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), +the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. +After L is available, this operator can optionally do a reduction operator.

+
    +
  • shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk), +with K >= 1 in case of K-dimensional loss.

  • +
  • shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk), +with K >= 1 in case of K-dimensional loss.

  • +
+

The loss for one sample, l_i, can caculated as follows:

+
l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.
+
+
+
+

or#

+
+

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

+
+

loss is zero for the case when label-value equals ignore_index.

+
l[i][d1][d2]...[dk]  = 0, when labels[n][d1][d2]...[dk] = ignore_index
+
+
+

where:

+
p = Softmax(scores)
+y = Log(p)
+c = labels[i][d1][d2]...[dk]
+
+
+

Finally, L is optionally reduced:

+
    +
  • If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk).

  • +
  • If reduction = ‘sum’, the output is scalar: Sum(L).

  • +
  • If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), +where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

  • +
+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • +
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • +
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+
+ +
+
+
+
+

OnnxSoftmaxCrossEntropyLoss_12#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_12(*args, **kwargs)#
+

Version

+

Onnx name: SoftmaxCrossEntropyLoss

+

This version of the operator has been available since +version 12.

+

Summary

+

Loss function that measures the softmax cross entropy +between ‘scores’ and ‘labels’. +This operator first computes a loss tensor whose shape is identical to the labels input. +If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). +If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), +the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. +After L is available, this operator can optionally do a reduction operator.

+
+
shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

+
+
shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk),

with K >= 1 in case of K-dimensional loss.

+
+
The loss for one sample, l_i, can caculated as follows:

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.

+
+
or

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

+
+
loss is zero for the case when label-value equals ignore_index.

l[i][d1][d2]…[dk] = 0, when labels[n][d1][d2]…[dk] = ignore_index

+
+
where:

p = Softmax(scores) +y = Log(p) +c = labels[i][d1][d2]…[dk]

+
+
+

Finally, L is optionally reduced: +If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk). +If reduction = ‘sum’, the output is scalar: Sum(L). +If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), +where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • +
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • +
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+ +
+
+
+
+

OnnxSoftmaxCrossEntropyLoss_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmaxCrossEntropyLoss_13(*args, **kwargs)#
+

Version

+

Onnx name: SoftmaxCrossEntropyLoss

+

This version of the operator has been available since +version 13.

+

Summary

+

Loss function that measures the softmax cross entropy +between ‘scores’ and ‘labels’. +This operator first computes a loss tensor whose shape is identical to the labels input. +If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, …, l_N). +If the input is N-D tensor with shape (N, C, D1, D2, …, Dk), +the loss tensor L may have (N, D1, D2, …, Dk) as its shape and L[i,][j_1][j_2]…[j_k] denotes a scalar element in L. +After L is available, this operator can optionally do a reduction operator.

+
    +
  • shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,…, Dk), +with K >= 1 in case of K-dimensional loss.

  • +
  • shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,…, Dk), +with K >= 1 in case of K-dimensional loss.

  • +
+

The loss for one sample, l_i, can caculated as follows:

+
l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.
+
+
+
+

or#

+
+

l[i][d1][d2]…[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if ‘weights’ is provided.

+
+

loss is zero for the case when label-value equals ignore_index.

+
l[i][d1][d2]...[dk]  = 0, when labels[n][d1][d2]...[dk] = ignore_index
+
+
+

where:

+
p = Softmax(scores)
+y = Log(p)
+c = labels[i][d1][d2]...[dk]
+
+
+

Finally, L is optionally reduced:

+
    +
  • If reduction = ‘none’, the output is L with shape (N, D1, D2, …, Dk).

  • +
  • If reduction = ‘sum’, the output is scalar: Sum(L).

  • +
  • If reduction = ‘mean’, the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), +where tensor W is of shape (N, D1, D2, …, Dk) and W[n][d1][d2]…[dk] = weights[labels[i][d1][d2]…[dk]].

  • +
+

Attributes

+
    +
  • +
  • reduction: Type of reduction to apply to loss: none, sum, mean(default). ‘none’: no reduction will be applied, ‘sum’: the output will be summed. ‘mean’: the sum of the output will be divided by the number of elements in the output. Default value is +name: "reduction" s: "mean" type: STRING

  • +
+

Inputs

+

Between 2 and 3 inputs.

+
    +
  • scores (heterogeneous)T: The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , …, Dk], where K is the number of dimensions.

  • +
  • labels (heterogeneous)Tind: The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, …, Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.

  • +
  • weights (optional, heterogeneous)T: A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.

  • +
+

Outputs

+

Between 1 and 2 outputs.

+
    +
  • output (heterogeneous)T: Weighted loss float Tensor. If reduction is ‘none’, this has the shape of [batch_size], or [batch_size, D1, D2, …, Dk] in case of K-dimensional loss. Otherwise, it is a scalar.

  • +
  • log_prob (optional, heterogeneous)T: Log probability tensor. If the output of softmax is prob, its value is log(prob).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
  • Tind tensor(int32), tensor(int64): Constrain target to integer types

  • +
+
+
+ +
+
+
+
+

OnnxSoftmax_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmax_1(*args, **kwargs)#
+

Version

+

Onnx name: Softmax

+

This version of the operator has been available since +version 1.

+

Summary

+
+
The operator computes the softmax (normalized exponential) values for each layer in the batch

of the given input. The input is a 2-D tensor (Tensor<float>) of size

+
+
+

(batch_size x input_feature_dimensions). The output tensor has the same shape +and contains the softmax values of the corresponding input.

+

Input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftmax_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmax_11(*args, **kwargs)#
+

Version

+

Onnx name: Softmax

+

This version of the operator has been available since +version 11.

+

Summary

+
+
The operator computes the softmax (normalized exponential) values for each layer in the batch

of the given input.

+
+
+

The input does not need to explicitly be a 2D vector; rather, it will be +coerced into one. For an arbitrary n-dimensional tensor +input in [a_0, a_1, …, a_{k-1}, a_k, …, a_{n-1}] and k is +the axis provided, then input will be coerced into a 2-dimensional tensor with +dimensions [a_0 * … * a_{k-1}, a_k * … * a_{n-1}]. For the default +case where axis=1, this means the input tensor will be coerced into a 2D tensor +of dimensions [a_0, a_1 * … * a_{n-1}], where a_0 is often the batch size. +In this situation, we must have a_0 = N and a_1 * … * a_{n-1} = D. +Each of these dimensions must be matched correctly, or else the operator +will throw errors. The output tensor has the same shape +and contains the softmax values of the corresponding input.

+

Attributes

+
    +
  • axis: Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: 1 type: INT

  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor that’s coerced into a 2D matrix of size (NxD) as described above.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as input tensor (the original size without coercion).

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftmax_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftmax_13(*args, **kwargs)#
+

Version

+

Onnx name: Softmax

+

This version of the operator has been available since +version 13.

+

Summary

+

The operator computes the normalized exponential values for the given input:

+
+

Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1)

+
+

The “axis” attribute indicates the dimension along which Softmax +will be performed. The output tensor has the same shape +and contains the Softmax values of the corresponding input.

+

Attributes

+
    +
  • axis:

  • +
+

Describes the dimension Softmax will be performed on. +Negative value means counting dimensions +from the back. Accepted range is [-r, r-1] where r = rank(input).

+
+
+
Default value is

name: "axis" i: -1 type: INT

+
+
+
+

Inputs

+
    +
  • input (heterogeneous)T: The input tensor of rank >= axis.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The output values with the same shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftplus#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftplus(*args, **kwargs)#
+

Version

+

Onnx name: Softplus

+

This version of the operator has been available since +version 1.

+

Summary

+

Softplus takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: 1D input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: 1D input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftplus_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftplus_1(*args, **kwargs)#
+

Version

+

Onnx name: Softplus

+

This version of the operator has been available since +version 1.

+

Summary

+

Softplus takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to +the tensor elementwise.

+

Inputs

+
    +
  • X (heterogeneous)T: 1D input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: 1D input tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftsign#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftsign(*args, **kwargs)#
+

Version

+

Onnx name: Softsign

+

This version of the operator has been available since +version 1.

+

Summary

+

Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The softsign (x/(1+|x|)) values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSoftsign_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSoftsign_1(*args, **kwargs)#
+

Version

+

Onnx name: Softsign

+

This version of the operator has been available since +version 1.

+

Summary

+

Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The softsign (x/(1+|x|)) values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSpaceToDepth#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth(*args, **kwargs)#
+

Version

+

Onnx name: SpaceToDepth

+

This version of the operator has been available since +version 13.

+

Summary

+

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, +this op outputs a copy of the input tensor where values from the height and width dimensions +are moved to the depth dimension.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSpaceToDepth_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_1(*args, **kwargs)#
+

Version

+

Onnx name: SpaceToDepth

+

This version of the operator has been available since +version 1.

+

Summary

+

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, +this op outputs a copy of the input tensor where values from the height and width dimensions +are moved to the depth dimension.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSpaceToDepth_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSpaceToDepth_13(*args, **kwargs)#
+

Version

+

Onnx name: SpaceToDepth

+

This version of the operator has been available since +version 13.

+

Summary

+

SpaceToDepth rearranges blocks of spatial data into depth. More specifically, +this op outputs a copy of the input tensor where values from the height and width dimensions +are moved to the depth dimension.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplit#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 18.

+

Summary

+

Split a tensor into a list of tensors, along the specified ‘axis’. +Either input ‘split’ or the attribute ‘num_outputs’ should be specified, but not both. +If the attribute ‘num_outputs’ is specified, then the tensor is split into equal sized parts. +If the tensor is not evenly splittable into num_outputs, the last chunk will be smaller. +If the input ‘split’ is specified, it indicates the sizes of each output in the split.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is +name: "axis" i: 0 type: INT

  • +
  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)tensor(int64): Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at ‘axis’ specified.

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplitToSequence#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplitToSequence(*args, **kwargs)#
+

Version

+

Onnx name: SplitToSequence

+

This version of the operator has been available since +version 11.

+

Summary

+

Split a tensor into a sequence of tensors, along the specified ‘axis’. +Lengths of the parts can be specified using the optional argument ‘split’. +If the argument `split’ is not specified, a default scalar value of 1 +is used as the value of `split’. +‘split’ must contain only positive numbers. +‘split’ is either a scalar (tensor of empty shape), or a 1-D tensor. +If ‘split’ is a scalar, then ‘input’ will be split into chunks all of size ‘split’ +if possible. The last chunk alone may be smaller than ‘split’ if the ‘input’ size +along the given axis ‘axis’ is not divisible by ‘split’. +If ‘split’ is a 1-dimensional tensor, the input tensor is split into ‘size(split)’ chunks, +with lengths of the parts on ‘axis’ specified in ‘split’. In this scenario, the sum of entries +in ‘split’ must be equal to the dimension size of input tensor on ‘axis’.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1]. Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the split dimension or not. Default 1, which means we keep split dimension. If input ‘split’ is specified, this attribute is ignored. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)I: Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0.

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: One or more outputs forming a sequence of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • +
  • I tensor(int32), tensor(int64): Constrain split size to integral tensor.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplitToSequence_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplitToSequence_11(*args, **kwargs)#
+

Version

+

Onnx name: SplitToSequence

+

This version of the operator has been available since +version 11.

+

Summary

+

Split a tensor into a sequence of tensors, along the specified ‘axis’. +Lengths of the parts can be specified using the optional argument ‘split’. +If the argument `split’ is not specified, a default scalar value of 1 +is used as the value of `split’. +‘split’ must contain only positive numbers. +‘split’ is either a scalar (tensor of empty shape), or a 1-D tensor. +If ‘split’ is a scalar, then ‘input’ will be split into chunks all of size ‘split’ +if possible. The last chunk alone may be smaller than ‘split’ if the ‘input’ size +along the given axis ‘axis’ is not divisible by ‘split’. +If ‘split’ is a 1-dimensional tensor, the input tensor is split into ‘size(split)’ chunks, +with lengths of the parts on ‘axis’ specified in ‘split’. In this scenario, the sum of entries +in ‘split’ must be equal to the dimension size of input tensor on ‘axis’.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1]. Default value is +name: "axis" i: 0 type: INT

  • +
  • keepdims: Keep the split dimension or not. Default 1, which means we keep split dimension. If input ‘split’ is specified, this attribute is ignored. Default value is +name: "keepdims" i: 1 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)I: Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0.

  • +
+

Outputs

+
    +
  • output_sequence (heterogeneous)S: One or more outputs forming a sequence of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input types to all tensor types.

  • +
  • I tensor(int32), tensor(int64): Constrain split size to integral tensor.

  • +
  • S seq(tensor(uint8)), seq(tensor(uint16)), seq(tensor(uint32)), seq(tensor(uint64)), seq(tensor(int8)), seq(tensor(int16)), seq(tensor(int32)), seq(tensor(int64)), seq(tensor(float16)), seq(tensor(float)), seq(tensor(double)), seq(tensor(string)), seq(tensor(bool)), seq(tensor(complex64)), seq(tensor(complex128)): Constrain output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplit_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit_1(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 1.

+

Summary

+

Split a tensor into a list of tensors, along the specified +‘axis’. The lengths of the split can be specified using argument ‘axis’ or +optional second input blob to the operator. Otherwise, the tensor is split +to equal sized parts.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)T: Optional list of output lengths (see also arg ‘split’)

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs… (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSplit_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit_11(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 11.

+

Summary

+

Split a tensor into a list of tensors, along the specified +‘axis’. Lengths of the parts can be specified using argument ‘split’. +Otherwise, the tensor is split to equal sized parts.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is +name: "axis" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplit_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit_13(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 13.

+

Summary

+

Split a tensor into a list of tensors, along the specified +‘axis’. Lengths of the parts can be specified using input ‘split’. +Otherwise, the tensor is split to equal sized parts.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is +name: "axis" i: 0 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)tensor(int64): Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at ‘axis’ specified.

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplit_18#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit_18(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 18.

+

Summary

+

Split a tensor into a list of tensors, along the specified ‘axis’. +Either input ‘split’ or the attribute ‘num_outputs’ should be specified, but not both. +If the attribute ‘num_outputs’ is specified, then the tensor is split into equal sized parts. +If the tensor is not evenly splittable into num_outputs, the last chunk will be smaller. +If the input ‘split’ is specified, it indicates the sizes of each output in the split.

+

Attributes

+
    +
  • axis: Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input). Default value is +name: "axis" i: 0 type: INT

  • +
  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
  • split (optional, heterogeneous)tensor(int64): Optional length of each output. Values should be >= 0.Sum of the values must be equal to the dim value at ‘axis’ specified.

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSplit_2#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSplit_2(*args, **kwargs)#
+

Version

+

Onnx name: Split

+

This version of the operator has been available since +version 2.

+

Summary

+

Split a tensor into a list of tensors, along the specified +‘axis’. Lengths of the parts can be specified using argument ‘split’. +Otherwise, the tensor is split to equal sized parts.

+

Attributes

+
    +
  • axis: Which axis to split on. Default value is +name: "axis" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: The tensor to split

  • +
+

Outputs

+

Between 1 and 2147483647 outputs.

+
    +
  • outputs (variadic, heterogeneous)T: One or more outputs forming list of tensors after splitting

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSqrt#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqrt(*args, **kwargs)#
+

Version

+

Onnx name: Sqrt

+

This version of the operator has been available since +version 13.

+

Summary

+

Square root takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the square root is, y = x^0.5, is applied to +the tensor elementwise. If x is negative, then it will return NaN.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSqrt_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqrt_1(*args, **kwargs)#
+

Version

+

Onnx name: Sqrt

+

This version of the operator has been available since +version 1.

+

Summary

+

Square root takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the square root is, y = x^0.5, is applied to +the tensor elementwise. If x is negative, then it will return NaN.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSqrt_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqrt_13(*args, **kwargs)#
+

Version

+

Onnx name: Sqrt

+

This version of the operator has been available since +version 13.

+

Summary

+

Square root takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the square root is, y = x^0.5, is applied to +the tensor elementwise. If x is negative, then it will return NaN.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSqrt_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqrt_6(*args, **kwargs)#
+

Version

+

Onnx name: Sqrt

+

This version of the operator has been available since +version 6.

+

Summary

+

Square root takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the square root is, y = x^0.5, is applied to +the tensor elementwise. If x is negative, then it will return NaN.

+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSqueeze#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqueeze(*args, **kwargs)#
+

Version

+

Onnx name: Squeeze

+

This version of the operator has been available since +version 13.

+

Summary

+

Remove single-dimensional entries from the shape of a tensor. +Takes an input axes with a list of axes to squeeze. +If axes is not provided, all the single dimensions will be removed from +the shape. If an axis is selected with shape entry not equal to one, an error is raised.

+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • +
  • axes (optional, heterogeneous)tensor(int64): List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSqueeze_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqueeze_1(*args, **kwargs)#
+

Version

+

Onnx name: Squeeze

+

This version of the operator has been available since +version 1.

+

Summary

+

Remove single-dimensional entries from the shape of a tensor. +Takes a parameter axes with a list of axes to squeeze. +If axes is not provided, all the single dimensions will be removed from +the shape. If an axis is selected with shape entry not equal to one, an error is raised.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • +
+

Outputs

+
    +
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSqueeze_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqueeze_11(*args, **kwargs)#
+

Version

+

Onnx name: Squeeze

+

This version of the operator has been available since +version 11.

+

Summary

+

Remove single-dimensional entries from the shape of a tensor. +Takes a parameter axes with a list of axes to squeeze. +If axes is not provided, all the single dimensions will be removed from +the shape. If an axis is selected with shape entry not equal to one, an error is raised.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • +
+

Outputs

+
    +
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxSqueeze_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSqueeze_13(*args, **kwargs)#
+

Version

+

Onnx name: Squeeze

+

This version of the operator has been available since +version 13.

+

Summary

+

Remove single-dimensional entries from the shape of a tensor. +Takes an input axes with a list of axes to squeeze. +If axes is not provided, all the single dimensions will be removed from +the shape. If an axis is selected with shape entry not equal to one, an error is raised.

+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • data (heterogeneous)T: Tensors with at least max(dims) dimensions.

  • +
  • axes (optional, heterogeneous)tensor(int64): List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).

  • +
+

Outputs

+
    +
  • squeezed (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxStringNormalizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxStringNormalizer(*args, **kwargs)#
+

Version

+

Onnx name: StringNormalizer

+

This version of the operator has been available since +version 10.

+

Summary

+

StringNormalization performs string operations for basic cleaning. +This operator has only one input (denoted by X) and only one output +(denoted by Y). This operator first examines the elements in the X, +and removes elements specified in “stopwords” attribute. +After removing stop words, the intermediate result can be further lowercased, +uppercased, or just returned depending the “case_change_action” attribute. +This operator only accepts [C]- and [1, C]-tensor. +If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] +if input shape is [C] and shape [1, 1] if input shape is [1, C].

+

Attributes

+
    +
  • case_change_action: string enum that cases output to be lowercased/uppercases/unchanged. Valid values are “LOWER”, “UPPER”, “NONE”. Default is “NONE” Default value is +name: "case_change_action" s: "NONE" type: STRING

  • +
  • is_case_sensitive: Boolean. Whether the identification of stop words in X is case-sensitive. Default is false Default value is +name: "is_case_sensitive" i: 0 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)tensor(string): UTF-8 strings to normalize

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(string): UTF-8 Normalized strings

  • +
+
+ +
+
+
+
+

OnnxStringNormalizer_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxStringNormalizer_10(*args, **kwargs)#
+

Version

+

Onnx name: StringNormalizer

+

This version of the operator has been available since +version 10.

+

Summary

+

StringNormalization performs string operations for basic cleaning. +This operator has only one input (denoted by X) and only one output +(denoted by Y). This operator first examines the elements in the X, +and removes elements specified in “stopwords” attribute. +After removing stop words, the intermediate result can be further lowercased, +uppercased, or just returned depending the “case_change_action” attribute. +This operator only accepts [C]- and [1, C]-tensor. +If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] +if input shape is [C] and shape [1, 1] if input shape is [1, C].

+

Attributes

+
    +
  • case_change_action: string enum that cases output to be lowercased/uppercases/unchanged. Valid values are “LOWER”, “UPPER”, “NONE”. Default is “NONE” Default value is +name: "case_change_action" s: "NONE" type: STRING

  • +
  • is_case_sensitive: Boolean. Whether the identification of stop words in X is case-sensitive. Default is false Default value is +name: "is_case_sensitive" i: 0 type: INT

  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)tensor(string): UTF-8 strings to normalize

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(string): UTF-8 Normalized strings

  • +
+
+ +
+
+
+
+

OnnxSub#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSub_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub_1(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 1.

+

Summary

+

Performs element-wise binary subtraction (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSub_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub_13(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 13.

+

Summary

+

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSub_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub_14(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 14.

+

Summary

+

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

(Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to all numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSub_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub_6(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 6.

+

Summary

+

Performs element-wise binary subtraction (with limited broadcast support).

+

If necessary the right-hand-side argument will be broadcasted to match the +shape of left-hand-side argument. When broadcasting is specified, the second +tensor can either be of element size 1 (including a scalar tensor and any +tensor with rank equal to or smaller than the first tensor), or having its +shape as a contiguous subset of the first tensor’s shape. The starting of the +mutually equal shape is specified by the argument “axis”, and if it is not set, +suffix matching is assumed. 1-dim expansion doesn’t work yet.

+

For example, the following tensor shapes are supported (with broadcast=1):

+
+

shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor +shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor +shape(A) = (2, 3, 4, 5), shape(B) = (5,) +shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) +shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 +shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0

+
+

Attribute broadcast=1 needs to be passed to enable broadcasting.

+

Attributes

+
    +
  • +
  • broadcast: Pass 1 to enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: First operand, should share the type with the second operand.

  • +
  • B (heterogeneous)T: Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same dimensions and type as A

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSub_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSub_7(*args, **kwargs)#
+

Version

+

Onnx name: Sub

+

This version of the operator has been available since +version 7.

+

Summary

+

Performs element-wise binary subtraction (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First operand.

  • +
  • B (heterogeneous)T: Second operand.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T: Result, has same element type as two inputs

  • +
+

Type Constraints

+
    +
  • T tensor(uint32), tensor(uint64), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to high-precision numeric tensors.

  • +
+
+ +
+
+
+
+

OnnxSum#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSum(*args, **kwargs)#
+

Version

+

Onnx name: Sum

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • +
+

Outputs

+
    +
  • sum (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSum_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSum_1(*args, **kwargs)#
+

Version

+

Onnx name: Sum

+

This version of the operator has been available since +version 1.

+

Summary

+

Element-wise sum of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Attributes

+
    +
  • +
+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Sum.

  • +
+

Outputs

+
    +
  • sum (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSum_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSum_13(*args, **kwargs)#
+

Version

+

Onnx name: Sum

+

This version of the operator has been available since +version 13.

+

Summary

+

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • +
+

Outputs

+
    +
  • sum (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSum_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSum_6(*args, **kwargs)#
+

Version

+

Onnx name: Sum

+

This version of the operator has been available since +version 6.

+

Summary

+

Element-wise sum of each of the input tensors. All inputs and outputs must +have the same shape and data type.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for Sum.

  • +
+

Outputs

+
    +
  • sum (heterogeneous)T: Output tensor. Same dimension as inputs.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxSum_8#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxSum_8(*args, **kwargs)#
+

Version

+

Onnx name: Sum

+

This version of the operator has been available since +version 8.

+

Summary

+

Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). +All inputs and outputs must have the same data type. +This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+

Between 1 and 2147483647 inputs.

+
    +
  • data_0 (variadic, heterogeneous)T: List of tensors for sum.

  • +
+

Outputs

+
    +
  • sum (heterogeneous)T: Output tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTan#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTan(*args, **kwargs)#
+

Version

+

Onnx name: Tan

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the tangent of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The tangent of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTan_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTan_7(*args, **kwargs)#
+

Version

+

Onnx name: Tan

+

This version of the operator has been available since +version 7.

+

Summary

+

Calculates the tangent of the given input tensor, element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The tangent of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTanh#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTanh(*args, **kwargs)#
+

Version

+

Onnx name: Tanh

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the hyperbolic tangent of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTanh_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTanh_1(*args, **kwargs)#
+

Version

+

Onnx name: Tanh

+

This version of the operator has been available since +version 1.

+

Summary

+

Calculates the hyperbolic tangent of the given input tensor element-wise.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • input (heterogeneous)T: 1-D input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTanh_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTanh_13(*args, **kwargs)#
+

Version

+

Onnx name: Tanh

+

This version of the operator has been available since +version 13.

+

Summary

+

Calculates the hyperbolic tangent of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double), tensor(bfloat16): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTanh_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTanh_6(*args, **kwargs)#
+

Version

+

Onnx name: Tanh

+

This version of the operator has been available since +version 6.

+

Summary

+

Calculates the hyperbolic tangent of the given input tensor element-wise.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: The hyperbolic tangent values of the input tensor computed element-wise

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTfIdfVectorizer#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer(*args, **kwargs)#
+

Version

+

Onnx name: TfIdfVectorizer

+

This version of the operator has been available since +version 9.

+

Summary

+

This transform extracts n-grams from the input sequence and save them as a vector. Input can +be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. +For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. +More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. +If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.

+

In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original +sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. +If the number of skips is 2, we should skip two tokens when scanning through the original sequence. +Let’s consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. +The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. +If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] +indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.

+

The output vector (denoted by Y) stores the count of each n-gram; +Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping +between index i and the corresponding n-gram’s output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], +ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], +respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. +Note that we may consider all skips up to S when generating the n-grams.

+

The examples used above are true if mode is “TF”. If mode is “IDF”, all the counts larger than 1 would be truncated to 1 and +the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is “TFIDF”, +this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.

+

Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. +If pool_strings is set, the input must be a string tensor.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input for n-gram extraction

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: Ngram results

  • +
+

Type Constraints

+
    +
  • T tensor(string), tensor(int32), tensor(int64): Input is ether string UTF-8 or int32/int64

  • +
  • T1 tensor(float): 1-D tensor of floats

  • +
+
+ +
+
+
+
+

OnnxTfIdfVectorizer_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTfIdfVectorizer_9(*args, **kwargs)#
+

Version

+

Onnx name: TfIdfVectorizer

+

This version of the operator has been available since +version 9.

+

Summary

+

This transform extracts n-grams from the input sequence and save them as a vector. Input can +be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. +For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. +More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. +If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.

+

In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original +sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. +If the number of skips is 2, we should skip two tokens when scanning through the original sequence. +Let’s consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. +The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. +If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] +indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.

+

The output vector (denoted by Y) stores the count of each n-gram; +Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping +between index i and the corresponding n-gram’s output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], +ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], +respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. +Note that we may consider all skips up to S when generating the n-grams.

+

The examples used above are true if mode is “TF”. If mode is “IDF”, all the counts larger than 1 would be truncated to 1 and +the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is “TFIDF”, +this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.

+

Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. +If pool_strings is set, the input must be a string tensor.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input for n-gram extraction

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T1: Ngram results

  • +
+

Type Constraints

+
    +
  • T tensor(string), tensor(int32), tensor(int64): Input is ether string UTF-8 or int32/int64

  • +
  • T1 tensor(float): 1-D tensor of floats

  • +
+
+ +
+
+
+
+

OnnxThresholdedRelu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu(*args, **kwargs)#
+

Version

+

Onnx name: ThresholdedRelu

+

This version of the operator has been available since +version 10.

+

Summary

+

ThresholdedRelu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Threshold value Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxThresholdedRelu_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxThresholdedRelu_10(*args, **kwargs)#
+

Version

+

Onnx name: ThresholdedRelu

+

This version of the operator has been available since +version 10.

+

Summary

+

ThresholdedRelu takes one input data (Tensor<T>) and produces one output data +(Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, +is applied to the tensor elementwise.

+

Attributes

+
    +
  • alpha: Threshold value Default value is +name: "alpha" f: 1.0 type: FLOAT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: Output tensor

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
+
+ +
+
+
+
+

OnnxTile#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTile(*args, **kwargs)#
+

Version

+

Onnx name: Tile

+

This version of the operator has been available since +version 13.

+

Summary

+

Constructs a tensor by tiling a given tensor. +This is the same as function tile in Numpy, but no broadcast. +For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of any shape.

  • +
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of the same dimensions and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • +
+
+ +
+
+
+
+

OnnxTile_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTile_1(*args, **kwargs)#
+

Version

+

Onnx name: Tile

+

This version of the operator has been available since +version 1.

+

Summary

+

Repeat the elements of a tensor along an axis.

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of any shape.

  • +
  • tiles (heterogeneous)T: Number of repeated copies to make of the input tensor.

  • +
  • axis (heterogeneous)T: Axis along which to repeat.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of same shape and type as input.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input types to float tensors.

  • +
  • T1 tensor(int64): Constrain tiles and axis’s type to int64 tensors.

  • +
+
+ +
+
+
+
+

OnnxTile_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTile_13(*args, **kwargs)#
+

Version

+

Onnx name: Tile

+

This version of the operator has been available since +version 13.

+

Summary

+

Constructs a tensor by tiling a given tensor. +This is the same as function tile in Numpy, but no broadcast. +For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of any shape.

  • +
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of the same dimensions and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • +
+
+ +
+
+
+
+

OnnxTile_6#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTile_6(*args, **kwargs)#
+

Version

+

Onnx name: Tile

+

This version of the operator has been available since +version 6.

+

Summary

+

Constructs a tensor by tiling a given tensor. +This is the same as function tile in Numpy, but no broadcast. +For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]

+

Inputs

+
    +
  • input (heterogeneous)T: Input tensor of any shape.

  • +
  • repeats (heterogeneous)T1: 1D int64 tensor of the same length as input’s dimension number, includes numbers of repeated copies along input’s dimensions.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of the same dimensions and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
  • T1 tensor(int64): Constrain repeat’s type to int64 tensors.

  • +
+
+ +
+
+
+
+

OnnxTopK#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTopK(*args, **kwargs)#
+

Version

+

Onnx name: TopK

+

This version of the operator has been available since +version 11.

+

Summary

+

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of +shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

+
    +
  • Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] +which contains the values of the top k elements along the specified axis

  • +
  • Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which +contains the indices of the top k elements (original indices from the input +tensor).

  • +
  • If “largest” is 1 (the default value) then the k largest elements are returned.

  • +
  • If “sorted” is 1 (the default value) then the resulting k elements will be sorted.

  • +
  • If “sorted” is 0, order of returned ‘Values’ and ‘Indices’ are undefined.

  • +
+

Given two equivalent values, this operator uses the indices along the axis as +a tiebreaker. That is, the element with the lower index will appear first.

+

Attributes

+
    +
  • axis: Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: -1 type: INT

  • +
  • largest: Whether to return the top-K largest or smallest elements. Default value is +name: "largest" i: 1 type: INT

  • +
  • sorted: Whether to return the elements in sorted order. Default value is +name: "sorted" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • +
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • +
+

Outputs

+
    +
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • +
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxTopK_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTopK_1(*args, **kwargs)#
+

Version

+

Onnx name: TopK

+

This version of the operator has been available since +version 1.

+

Summary

+

Retrieve the top-K elements along a specified axis. Given an input tensor of +shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

+
+
+
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

+
+
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input +tensor).

+
+
+
+
+
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

+
+
+

Attributes

+
    +
  • axis: Dimension on which to do the sort. Default value is +name: "axis" i: -1 type: INT

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • +
+

Outputs

+
    +
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • +
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxTopK_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTopK_10(*args, **kwargs)#
+

Version

+

Onnx name: TopK

+

This version of the operator has been available since +version 10.

+

Summary

+

Retrieve the top-K elements along a specified axis. Given an input tensor of +shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

+
+
+
-Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n]

which contains the values of the top k elements along the specified axis

+
+
-Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which

contains the indices of the top k elements (original indices from the input +tensor).

+
+
+
+
+
Given two equivalent values, this operator uses the indices along the axis as

a tiebreaker. That is, the element with the lower index will appear first.

+
+
+

Attributes

+
    +
  • axis: Dimension on which to do the sort. Default value is +name: "axis" i: -1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • +
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • +
+

Outputs

+
    +
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • +
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • +
+

Type Constraints

+
    +
  • T tensor(float16), tensor(float), tensor(double): Constrain input and output types to float tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxTopK_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTopK_11(*args, **kwargs)#
+

Version

+

Onnx name: TopK

+

This version of the operator has been available since +version 11.

+

Summary

+

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of +shape [a_1, a_2, …, a_n, r] and integer argument k, return two outputs:

+
    +
  • Value tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] +which contains the values of the top k elements along the specified axis

  • +
  • Index tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] which +contains the indices of the top k elements (original indices from the input +tensor).

  • +
  • If “largest” is 1 (the default value) then the k largest elements are returned.

  • +
  • If “sorted” is 1 (the default value) then the resulting k elements will be sorted.

  • +
  • If “sorted” is 0, order of returned ‘Values’ and ‘Indices’ are undefined.

  • +
+

Given two equivalent values, this operator uses the indices along the axis as +a tiebreaker. That is, the element with the lower index will appear first.

+

Attributes

+
    +
  • axis: Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). Default value is +name: "axis" i: -1 type: INT

  • +
  • largest: Whether to return the top-K largest or smallest elements. Default value is +name: "largest" i: 1 type: INT

  • +
  • sorted: Whether to return the elements in sorted order. Default value is +name: "sorted" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_n, r]

  • +
  • K (heterogeneous)tensor(int64): A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve

  • +
+

Outputs

+
    +
  • Values (heterogeneous)T: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing top K values from the input tensor

  • +
  • Indices (heterogeneous)I: Tensor of shape [a_1, a_2, …, a_{axis-1}, k, a_{axis+1}, … a_n] containing the corresponding input tensor indices for the top K values.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double): Constrain input and output types to numeric tensors.

  • +
  • I tensor(int64): Constrain index tensor to int64

  • +
+
+ +
+
+
+
+

OnnxTranspose#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTranspose(*args, **kwargs)#
+

Version

+

Onnx name: Transpose

+

This version of the operator has been available since +version 13.

+

Summary

+

Transpose the input tensor similar to numpy.transpose. For example, when +perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape +will be (2, 1, 3).

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • transposed (heterogeneous)T: Transposed output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxTranspose_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTranspose_1(*args, **kwargs)#
+

Version

+

Onnx name: Transpose

+

This version of the operator has been available since +version 1.

+

Summary

+

Transpose the input tensor similar to numpy.transpose. For example, when +perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape +will be (2, 1, 3).

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • transposed (heterogeneous)T: Transposed output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxTranspose_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTranspose_13(*args, **kwargs)#
+

Version

+

Onnx name: Transpose

+

This version of the operator has been available since +version 13.

+

Summary

+

Transpose the input tensor similar to numpy.transpose. For example, when +perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape +will be (2, 1, 3).

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: An input tensor.

  • +
+

Outputs

+
    +
  • transposed (heterogeneous)T: Transposed output.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleClassifier#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleClassifier

+

This version of the operator has been available since +version 3 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble classifier. Returns the top class for each of N inputs.

+

The attributes named ‘nodes_X’ form a sequence of tuples, associated by +index into the sequences, which must all be of equal length. These tuples +define the nodes.

+

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. +A leaf may have multiple votes, where each vote is weighted by +the associated class_weights index.

+

One and only one of classlabels_strings or classlabels_int64s +will be defined. The class_ids are indices into this list. +All fields ending with <i>_as_tensor</i> can be used instead of the +same parameter without the suffix if the element type is double and not float.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: N, Top class for each point

  • +
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleClassifier_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_1(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleClassifier

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble classifier. Returns the top class for each of N inputs.

+

The attributes named ‘nodes_X’ form a sequence of tuples, associated by +index into the sequences, which must all be of equal length. These tuples +define the nodes.

+

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. +A leaf may have multiple votes, where each vote is weighted by +the associated class_weights index.

+

One and only one of classlabels_strings or classlabels_int64s +will be defined. The class_ids are indices into this list.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: N, Top class for each point

  • +
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleClassifier_3#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleClassifier_3(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleClassifier

+

This version of the operator has been available since +version 3 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble classifier. Returns the top class for each of N inputs.

+

The attributes named ‘nodes_X’ form a sequence of tuples, associated by +index into the sequences, which must all be of equal length. These tuples +define the nodes.

+

Similarly, all fields prefixed with ‘class_’ are tuples of votes at the leaves. +A leaf may have multiple votes, where each vote is weighted by +the associated class_weights index.

+

One and only one of classlabels_strings or classlabels_int64s +will be defined. The class_ids are indices into this list. +All fields ending with <i>_as_tensor</i> can be used instead of the +same parameter without the suffix if the element type is double and not float.

+

Attributes

+
    +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br> One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT.’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T1: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T2: N, Top class for each point

  • +
  • Z (heterogeneous)tensor(float): The class score for each class, for each point, a tensor of shape [N,E].

  • +
+

Type Constraints

+
    +
  • T1 tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
  • T2 tensor(string), tensor(int64): The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleRegressor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleRegressor

+

This version of the operator has been available since +version 3 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble regressor. Returns the regressed values for each input in N.

+

All args with nodes_ are fields of a tuple of tree nodes, and +it is assumed they are the same length, and an index i will decode the +tuple across these inputs. Each node id can appear only once +for each tree id.

+

All fields prefixed with target_ are tuples of votes at the leaves.

+

A leaf may have multiple votes, where each vote is weighted by +the associated target_weights index.

+

All fields ending with <i>_as_tensor</i> can be used instead of the +same parameter without the suffix if the element type is double and not float. +All trees must have their node ids start at 0 and increment by 1.

+

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

+

Attributes

+
    +
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is +name: "aggregate_function" s: "SUM" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): N classes

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleRegressor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_1(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleRegressor

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble regressor. Returns the regressed values for each input in N.

+

All args with nodes_ are fields of a tuple of tree nodes, and +it is assumed they are the same length, and an index i will decode the +tuple across these inputs. Each node id can appear only once +for each tree id.

+

All fields prefixed with target_ are tuples of votes at the leaves.

+

A leaf may have multiple votes, where each vote is weighted by +the associated target_weights index.

+

All trees must have their node ids start at 0 and increment by 1.

+

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

+

Attributes

+
    +
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is +name: "aggregate_function" s: "SUM" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): N classes

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxTreeEnsembleRegressor_3#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTreeEnsembleRegressor_3(*args, **kwargs)#
+

Version

+

Onnx name: TreeEnsembleRegressor

+

This version of the operator has been available since +version 3 of domain ai.onnx.ml.

+

Summary

+

Tree Ensemble regressor. Returns the regressed values for each input in N.

+

All args with nodes_ are fields of a tuple of tree nodes, and +it is assumed they are the same length, and an index i will decode the +tuple across these inputs. Each node id can appear only once +for each tree id.

+

All fields prefixed with target_ are tuples of votes at the leaves.

+

A leaf may have multiple votes, where each vote is weighted by +the associated target_weights index.

+

All fields ending with <i>_as_tensor</i> can be used instead of the +same parameter without the suffix if the element type is double and not float. +All trees must have their node ids start at 0 and increment by 1.

+

Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF

+

Attributes

+
    +
  • aggregate_function: Defines how to aggregate leaf values within a target. <br>One of ‘AVERAGE,’ ‘SUM,’ ‘MIN,’ ‘MAX.’ Default value is +name: "aggregate_function" s: "SUM" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • +
  • post_transform: Indicates the transform to apply to the score. <br>One of ‘NONE,’ ‘SOFTMAX,’ ‘LOGISTIC,’ ‘SOFTMAX_ZERO,’ or ‘PROBIT’ Default value is +name: "post_transform" s: "NONE" type: STRING

  • +
  • +
  • +
  • +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: Input of shape [N,F]

  • +
+

Outputs

+
    +
  • Y (heterogeneous)tensor(float): N classes

  • +
+

Type Constraints

+
    +
  • T tensor(float), tensor(double), tensor(int64), tensor(int32): The input type must be a tensor of a numeric type.

  • +
+
+ +
+
+
+
+

OnnxTrilu#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTrilu(*args, **kwargs)#
+

Version

+

Onnx name: Trilu

+

This version of the operator has been available since +version 14.

+

Summary

+

Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). +The attribute “upper” determines whether the upper or lower part is retained. If set to true, +the upper triangular matrix is retained. Lower triangular matrix is retained otherwise. +Default value for the “upper” attribute is true. +Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists +of the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal. +All other elements in the matrix are set to zero. +If k = 0, the triangular part on and above/below the main diagonal is retained. +If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it. +A negative k value retains the main diagonal and |k| diagonals below it. +If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. +A negative k value excludes the main diagonal and (|k|-1) diagonals below it.

+

Attributes

+
    +
  • upper: Boolean. Indicates whether upper or lower part of matrix is retained. Default is true. Default value is +name: "upper" i: 1 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: Input tensor of rank 2 or higher.

  • +
  • k (optional, heterogeneous)tensor(int64): A 0-D tensor containing a single value corresponding to the number diagonals above or below the main diagonal to exclude or include. Default value is 0 if it’s not specified.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of the same type and shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxTrilu_14#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxTrilu_14(*args, **kwargs)#
+

Version

+

Onnx name: Trilu

+

This version of the operator has been available since +version 14.

+

Summary

+

Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). +The attribute “upper” determines whether the upper or lower part is retained. If set to true, +the upper triangular matrix is retained. Lower triangular matrix is retained otherwise. +Default value for the “upper” attribute is true. +Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists +of the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal. +All other elements in the matrix are set to zero. +If k = 0, the triangular part on and above/below the main diagonal is retained. +If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it. +A negative k value retains the main diagonal and |k| diagonals below it. +If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. +A negative k value excludes the main diagonal and (|k|-1) diagonals below it.

+

Attributes

+
    +
  • upper: Boolean. Indicates whether upper or lower part of matrix is retained. Default is true. Default value is +name: "upper" i: 1 type: INT

  • +
+

Inputs

+

Between 1 and 2 inputs.

+
    +
  • input (heterogeneous)T: Input tensor of rank 2 or higher.

  • +
  • k (optional, heterogeneous)tensor(int64): A 0-D tensor containing a single value corresponding to the number diagonals above or below the main diagonal to exclude or include. Default value is 0 if it’s not specified.

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Output tensor of the same type and shape as the input tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUnique#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnique(*args, **kwargs)#
+

Version

+

Onnx name: Unique

+

This version of the operator has been available since +version 11.

+

Summary

+

Find the unique elements of a tensor. When an optional attribute ‘axis’ is provided, unique subtensors sliced along the ‘axis’ are returned. +Otherwise the input tensor is flattened and unique values of the flattened tensor are returned.

+

This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. +The first output tensor ‘Y’ contains all unique values or subtensors of the input. +The second optional output tensor ‘indices’ contains indices of ‘Y’ elements’ first occurance in ‘X’.. +The third optional output tensor ‘inverse_indices’ contains, for elements of ‘X’, its corresponding indices in ‘Y’. “. +The fourth optional output tensor ‘counts’ contains the count of each element of ‘Y’ in the input.

+

Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.

+

https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html

+

Example 1:

+
input_X = [2, 1, 1, 3, 4, 3]
+attribute_sorted = 0
+attribute_axis = None
+output_Y = [2, 1, 3, 4]
+output_indices = [0, 1, 3, 4]
+output_inverse_indices = [0, 1, 1, 2, 3, 2]
+output_counts = [1, 2, 2, 1]
+
+
+

Example 2:

+
input_X = [[1, 3], [2, 3]]
+attribute_sorted = 1
+attribute_axis = None
+output_Y = [1, 2, 3]
+output_indices = [0, 2, 1]
+output_inverse_indices = [0, 2, 1, 2]
+output_counts = [1, 1, 2]
+
+
+

Example 3:

+
input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]
+attribute_sorted = 1
+attribute_axis = 0
+output_Y = [[1, 0, 0], [2, 3, 4]]
+output_indices = [0, 2]
+output_inverse_indices = [0, 0, 1]
+output_counts = [2, 1]
+
+
+

Example 4:

+
input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
+            [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]
+attribute_sorted = 1
+attribute_axis = 1
+
+
+

intermediate data are presented below for better understanding: +there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):

+
A: [[1, 1], [1, 1]],
+   [[0, 1], [0, 1]],
+   [[2, 1], [2, 1]],
+   [[0, 1], [0, 1]].
+
+
+

there are 3 unique subtensors:

+
[[1, 1], [1, 1]],
+[[0, 1], [0, 1]],
+[[2, 1], [2, 1]].
+
+
+

sorted unique subtensors:

+
B: [[0, 1], [0, 1]],
+   [[1, 1], [1, 1]],
+   [[2, 1], [2, 1]].
+
+
+

output_Y is constructed from B:

+
[[[0. 1.], [1. 1.], [2. 1.]],
+ [[0. 1.], [1. 1.], [2. 1.]]]
+
+
+

output_indices is to map from B to A:

+
[1, 0, 2]
+
+
+

output_inverse_indices is to map from A to B:

+
[1, 0, 2, 0]
+
+
+

output_counts:

+
[2, 1, 1]
+
+
+

Attributes

+
    +
  • +
  • sorted: (Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default). Default value is +name: "sorted" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: A N-D input tensor that is to be processed.

  • +
+

Outputs

+

Between 1 and 4 outputs.

+
    +
  • Y (heterogeneous)T: A tensor of the same type as ‘X’ containing all the unique values or subtensors sliced along a provided ‘axis’ in ‘X’, either sorted or maintained in the same order they occur in input ‘X’

  • +
  • indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing indices of ‘Y’ elements’ first occurance in ‘X’. When ‘axis’ is provided, it contains indices to subtensors in input ‘X’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in the flattened input tensor.

  • +
  • inverse_indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing, for elements of ‘X’, its corresponding indices in ‘Y’. When ‘axis’ is provided, it contains indices to subtensors in output ‘Y’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in output ‘Y’.

  • +
  • counts (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing the count of each element of ‘Y’ in input ‘X’

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input can be of any tensor type.

  • +
+
+ +
+
+
+
+

OnnxUnique_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnique_11(*args, **kwargs)#
+

Version

+

Onnx name: Unique

+

This version of the operator has been available since +version 11.

+

Summary

+

Find the unique elements of a tensor. When an optional attribute ‘axis’ is provided, unique subtensors sliced along the ‘axis’ are returned. +Otherwise the input tensor is flattened and unique values of the flattened tensor are returned.

+

This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. +The first output tensor ‘Y’ contains all unique values or subtensors of the input. +The second optional output tensor ‘indices’ contains indices of ‘Y’ elements’ first occurance in ‘X’.. +The third optional output tensor ‘inverse_indices’ contains, for elements of ‘X’, its corresponding indices in ‘Y’. “. +The fourth optional output tensor ‘counts’ contains the count of each element of ‘Y’ in the input.

+

Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.

+

https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html

+

Example 1:

+
input_X = [2, 1, 1, 3, 4, 3]
+attribute_sorted = 0
+attribute_axis = None
+output_Y = [2, 1, 3, 4]
+output_indices = [0, 1, 3, 4]
+output_inverse_indices = [0, 1, 1, 2, 3, 2]
+output_counts = [1, 2, 2, 1]
+
+
+

Example 2:

+
input_X = [[1, 3], [2, 3]]
+attribute_sorted = 1
+attribute_axis = None
+output_Y = [1, 2, 3]
+output_indices = [0, 2, 1]
+output_inverse_indices = [0, 2, 1, 2]
+output_counts = [1, 1, 2]
+
+
+

Example 3:

+
input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]
+attribute_sorted = 1
+attribute_axis = 0
+output_Y = [[1, 0, 0], [2, 3, 4]]
+output_indices = [0, 2]
+output_inverse_indices = [0, 0, 1]
+output_counts = [2, 1]
+
+
+

Example 4:

+
input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
+            [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]
+attribute_sorted = 1
+attribute_axis = 1
+
+
+

intermediate data are presented below for better understanding: +there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):

+
A: [[1, 1], [1, 1]],
+   [[0, 1], [0, 1]],
+   [[2, 1], [2, 1]],
+   [[0, 1], [0, 1]].
+
+
+

there are 3 unique subtensors:

+
[[1, 1], [1, 1]],
+[[0, 1], [0, 1]],
+[[2, 1], [2, 1]].
+
+
+

sorted unique subtensors:

+
B: [[0, 1], [0, 1]],
+   [[1, 1], [1, 1]],
+   [[2, 1], [2, 1]].
+
+
+

output_Y is constructed from B:

+
[[[0. 1.], [1. 1.], [2. 1.]],
+ [[0. 1.], [1. 1.], [2. 1.]]]
+
+
+

output_indices is to map from B to A:

+
[1, 0, 2]
+
+
+

output_inverse_indices is to map from A to B:

+
[1, 0, 2, 0]
+
+
+

output_counts:

+
[2, 1, 1]
+
+
+

Attributes

+
    +
  • +
  • sorted: (Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default). Default value is +name: "sorted" i: 1 type: INT

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: A N-D input tensor that is to be processed.

  • +
+

Outputs

+

Between 1 and 4 outputs.

+
    +
  • Y (heterogeneous)T: A tensor of the same type as ‘X’ containing all the unique values or subtensors sliced along a provided ‘axis’ in ‘X’, either sorted or maintained in the same order they occur in input ‘X’

  • +
  • indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing indices of ‘Y’ elements’ first occurance in ‘X’. When ‘axis’ is provided, it contains indices to subtensors in input ‘X’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in the flattened input tensor.

  • +
  • inverse_indices (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing, for elements of ‘X’, its corresponding indices in ‘Y’. When ‘axis’ is provided, it contains indices to subtensors in output ‘Y’ on the ‘axis’. When ‘axis’ is not provided, it contains indices to values in output ‘Y’.

  • +
  • counts (optional, heterogeneous)tensor(int64): A 1-D INT64 tensor containing the count of each element of ‘Y’ in input ‘X’

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Input can be of any tensor type.

  • +
+
+ +
+
+
+
+

OnnxUnsqueeze#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze(*args, **kwargs)#
+

Version

+

Onnx name: Unsqueeze

+

This version of the operator has been available since +version 13.

+

Summary

+

Insert single-dimensional entries to the shape of an input tensor (data). +Takes one required input axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

+

For example, given an input tensor (data) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

+

The input axes should not contain any duplicate entries. It is an error if it contains duplicates. +The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. +Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. +The order of values in axes does not matter and can come in any order.

+

Inputs

+
    +
  • data (heterogeneous)T: Original tensor

  • +
  • axes (heterogeneous)tensor(int64): List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded).

  • +
+

Outputs

+
    +
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUnsqueeze_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_1(*args, **kwargs)#
+

Version

+

Onnx name: Unsqueeze

+

This version of the operator has been available since +version 1.

+

Summary

+

Insert single-dimensional entries to the shape of a tensor. +Takes one required argument axes, a list of dimensions that will be inserted. +Dimension indices in axes are as seen in the output tensor. For example:

+
+

Given a tensor such that tensor with shape [3, 4, 5], then +Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]

+
+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Original tensor

  • +
+

Outputs

+
    +
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUnsqueeze_11#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_11(*args, **kwargs)#
+

Version

+

Onnx name: Unsqueeze

+

This version of the operator has been available since +version 11.

+

Summary

+

Insert single-dimensional entries to the shape of an input tensor (data). +Takes one required argument axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

+
+
For example:

Given an input tensor (data) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

+
+
+

The attribute axes should not contain any duplicate entries. It is an error if it contains duplicates. +The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. +Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. +The order of values in axes does not matter and can come in any order.

+

Attributes

+
    +
  • +
+

Inputs

+
    +
  • data (heterogeneous)T: Original tensor

  • +
+

Outputs

+
    +
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUnsqueeze_13#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUnsqueeze_13(*args, **kwargs)#
+

Version

+

Onnx name: Unsqueeze

+

This version of the operator has been available since +version 13.

+

Summary

+

Insert single-dimensional entries to the shape of an input tensor (data). +Takes one required input axes - which contains a list of dimension indices and this operator will insert a dimension of value 1 into the corresponding index of the output tensor (expanded).

+

For example, given an input tensor (data) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (expanded) containing same data as data but with shape [1, 3, 4, 5, 1].

+

The input axes should not contain any duplicate entries. It is an error if it contains duplicates. +The rank of the output tensor (output_rank) is the rank of the input tensor (data) plus the number of values in axes. +Each value in axes should be within the (inclusive) range [-output_rank , output_rank - 1]. +The order of values in axes does not matter and can come in any order.

+

Inputs

+
    +
  • data (heterogeneous)T: Original tensor

  • +
  • axes (heterogeneous)tensor(int64): List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded).

  • +
+

Outputs

+
    +
  • expanded (heterogeneous)T: Reshaped tensor with same data as input.

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUpsample#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUpsample(*args, **kwargs)#
+

Version

+

Onnx name: Upsample

+

This version of the operator has been deprecated since +version 10.

+

Summary

+

Upsample the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * scale).

+
+

Attributes

+
    +
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is +name: "mode" s: "nearest" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: N-D tensor

  • +
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUpsample_10#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUpsample_10(*args, **kwargs)#
+

Version

+

Onnx name: Upsample

+

This version of the operator has been deprecated since +version 10.

+

Summary

+

Upsample the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * scale).

+
+

Attributes

+
    +
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is +name: "mode" s: "nearest" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: N-D tensor

  • +
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUpsample_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUpsample_7(*args, **kwargs)#
+

Version

+

Onnx name: Upsample

+

This version of the operator has been available since +version 7.

+

Summary

+

Upsample the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * scale).

+
+

Attributes

+
    +
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is +name: "mode" s: "nearest" type: STRING

  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)T: N-D tensor

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxUpsample_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxUpsample_9(*args, **kwargs)#
+

Version

+

Onnx name: Upsample

+

This version of the operator has been available since +version 9.

+

Summary

+

Upsample the input tensor. +Each dimension value of the output tensor is:

+
+

output_dimension = floor(input_dimension * scale).

+
+

Attributes

+
    +
  • mode: Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc) Default value is +name: "mode" s: "nearest" type: STRING

  • +
+

Inputs

+
    +
  • X (heterogeneous)T: N-D tensor

  • +
  • scales (heterogeneous)tensor(float): The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of ‘scales’ should be the same as the rank of input ‘X’.

  • +
+

Outputs

+
    +
  • Y (heterogeneous)T: N-D tensor after resizing

  • +
+

Type Constraints

+
    +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input ‘X’ and output ‘Y’ to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxWhere#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxWhere(*args, **kwargs)#
+

Version

+

Onnx name: Where

+

This version of the operator has been available since +version 16.

+

Summary

+

Return elements, either from X or Y, depending on condition. +Where behaves like +[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) +with three parameters.

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • +
  • X (heterogeneous)T: values selected at indices where condition is True

  • +
  • Y (heterogeneous)T: values selected at indices where condition is False

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • +
+

Type Constraints

+
    +
  • B tensor(bool): Constrain to boolean tensors.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types (including bfloat).

  • +
+
+ +
+
+
+
+

OnnxWhere_16#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxWhere_16(*args, **kwargs)#
+

Version

+

Onnx name: Where

+

This version of the operator has been available since +version 16.

+

Summary

+

Return elements, either from X or Y, depending on condition. +Where behaves like +[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) +with three parameters.

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • +
  • X (heterogeneous)T: values selected at indices where condition is True

  • +
  • Y (heterogeneous)T: values selected at indices where condition is False

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • +
+

Type Constraints

+
    +
  • B tensor(bool): Constrain to boolean tensors.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types (including bfloat).

  • +
+
+ +
+
+
+
+

OnnxWhere_9#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxWhere_9(*args, **kwargs)#
+

Version

+

Onnx name: Where

+

This version of the operator has been available since +version 9.

+

Summary

+

Return elements, either from X or Y, depending on condition. +Where behaves like +[numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) +with three parameters.

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • condition (heterogeneous)B: When True (nonzero), yield X, otherwise yield Y

  • +
  • X (heterogeneous)T: values selected at indices where condition is True

  • +
  • Y (heterogeneous)T: values selected at indices where condition is False

  • +
+

Outputs

+
    +
  • output (heterogeneous)T: Tensor of shape equal to the broadcasted shape of condition, X, and Y.

  • +
+

Type Constraints

+
    +
  • B tensor(bool): Constrain to boolean tensors.

  • +
  • T tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128): Constrain input and output types to all tensor types.

  • +
+
+ +
+
+
+
+

OnnxXor#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxXor(*args, **kwargs)#
+

Version

+

Onnx name: Xor

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the xor logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxXor_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxXor_1(*args, **kwargs)#
+

Version

+

Onnx name: Xor

+

This version of the operator has been available since +version 1.

+

Summary

+

Returns the tensor resulted from performing the xor logical operation +elementwise on the input tensors A and B.

+

If broadcasting is enabled, the right-hand-side argument will be broadcasted +to match the shape of left-hand-side argument. See the doc of Add for a +detailed description of the broadcasting rules.

+

Attributes

+
    +
  • +
  • broadcast: Enable broadcasting Default value is +name: "broadcast" i: 0 type: INT

  • +
+

Inputs

+
    +
  • A (heterogeneous)T: Left input tensor for the logical operator.

  • +
  • B (heterogeneous)T: Right input tensor for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxXor_7#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxXor_7(*args, **kwargs)#
+

Version

+

Onnx name: Xor

+

This version of the operator has been available since +version 7.

+

Summary

+

Returns the tensor resulted from performing the xor logical operation +elementwise on the input tensors A and B (with Numpy-style broadcasting support).

+

This operator supports multidirectional (i.e., Numpy-style) broadcasting; for more details please check Broadcasting in ONNX.

+

Inputs

+
    +
  • A (heterogeneous)T: First input operand for the logical operator.

  • +
  • B (heterogeneous)T: Second input operand for the logical operator.

  • +
+

Outputs

+
    +
  • C (heterogeneous)T1: Result tensor.

  • +
+

Type Constraints

+
    +
  • T tensor(bool): Constrain input to boolean tensor.

  • +
  • T1 tensor(bool): Constrain output to boolean tensor.

  • +
+
+ +
+
+
+
+

OnnxZipMap#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxZipMap(*args, **kwargs)#
+

Version

+

Onnx name: ZipMap

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Creates a map from the input and the attributes.

+

The values are provided by the input tensor, while the keys are specified by the attributes. +Must provide keys in either classlabels_strings or classlabels_int64s (but not both).

+

The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)tensor(float): The input values

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: The output map

  • +
+

Type Constraints

+
    +
  • T seq(map(string, float)), seq(map(int64, float)): The output will be a sequence of string or integer maps to float.

  • +
+
+ +
+
+
+
+

OnnxZipMap_1#

+
+
+class skl2onnx.algebra.onnx_ops.OnnxZipMap_1(*args, **kwargs)#
+

Version

+

Onnx name: ZipMap

+

This version of the operator has been available since +version 1 of domain ai.onnx.ml.

+

Summary

+

Creates a map from the input and the attributes.

+

The values are provided by the input tensor, while the keys are specified by the attributes. +Must provide keys in either classlabels_strings or classlabels_int64s (but not both).

+

The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.

+

Attributes

+
    +
  • +
  • +
+

Inputs

+
    +
  • X (heterogeneous)tensor(float): The input values

  • +
+

Outputs

+
    +
  • Z (heterogeneous)T: The output map

  • +
+

Type Constraints

+
    +
  • T seq(map(string, float)), seq(map(int64, float)): The output will be a sequence of string or integer maps to float.

  • +
+
+ +
+
+
+
+
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_1-5_external.html b/tutorial_1-5_external.html index 35d6282f2..5cdbbb6da 100644 --- a/tutorial_1-5_external.html +++ b/tutorial_1-5_external.html @@ -1,452 +1,354 @@ - - - - - - - - - Using converters from other libraries — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

Using converters from other libraries#

-

Before starting writing our own converter, -we can use some available in other libraries -than sklearn-onnx. onnxmltools implements -converters for xgboost and LightGBM. -Following examples show how to use the conveter when the -model are part of a pipeline.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Using converters from other libraries - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+ +
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_1_simple.html b/tutorial_1_simple.html index 84a52e8e3..cf9d0c6b9 100644 --- a/tutorial_1_simple.html +++ b/tutorial_1_simple.html @@ -1,463 +1,364 @@ - - - - - - - - - The easy case — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

The easy case#

-

The easy case is when the machine learned model -can be converter into ONNX with a converting library -without writing nay specific code. That means that a converter -exists for the model or each piece of the model, -the converter produces an ONNX graph where every node -is part of the existing ONNX specifications, the runtime -used to compute the predictions implements every node -used in the ONNX graph.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + The easy case - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

The easy case#

+

The easy case is when the machine learned model +can be converter into ONNX with a converting library +without writing nay specific code. That means that a converter +exists for the model or each piece of the model, +the converter produces an ONNX graph where every node +is part of the existing ONNX specifications, the runtime +used to compute the predictions implements every node +used in the ONNX graph.

+ +
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_2-5_extlib.html b/tutorial_2-5_extlib.html index 8f49b0542..c1a0f5d09 100644 --- a/tutorial_2-5_extlib.html +++ b/tutorial_2-5_extlib.html @@ -1,451 +1,352 @@ - - - - - - - - - Write converters for other libraries — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

Write converters for other libraries#

-

sklearn-onnx only converts models from scikit-learn. It -implements a mechanism to register converters from other libraries. -Converters for models from other libraries will not be added to -sklearn-onnx. Every library has its own maintenance cycle and -it would become difficult to maintain a package having too many -dependencies. Following examples were added to show how to -develop converters for new libraries.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Write converters for other libraries - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Write converters for other libraries#

+

sklearn-onnx only converts models from scikit-learn. It +implements a mechanism to register converters from other libraries. +Converters for models from other libraries will not be added to +sklearn-onnx. Every library has its own maintenance cycle and +it would become difficult to maintain a package having too many +dependencies. Following examples were added to show how to +develop converters for new libraries.

+ +
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_2_new_converter.html b/tutorial_2_new_converter.html index 295fcee6c..142e2fa3d 100644 --- a/tutorial_2_new_converter.html +++ b/tutorial_2_new_converter.html @@ -1,493 +1,394 @@ - - - - - - - - - A custom converter for a custom model — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

A custom converter for a custom model#

-

When sklearn-onnx converts a scikit-learn -pipeline, it looks into every transformer and predictor -and fetches the associated converter. The resulting -ONNX graph combines the outcome of every converter -in a single graph. If a model does not have its converter, -it displays an error message telling it misses a converter.

-

<<<

-
import numpy
-from sklearn.linear_model import LogisticRegression
-from skl2onnx import to_onnx
-
-
-class MyLogisticRegression(LogisticRegression):
-    pass
-
-
-X = numpy.array([[0, 0.1]])
-try:
-    to_onnx(MyLogisticRegression(), X)
-except Exception as e:
-    print(e)
-
-
-

>>>

-
    Unable to find a shape calculator for type '<class 'pyquickhelper.sphinxext.sphinx_runpython_extension.run_python_script_2329367799808.<locals>.MyLogisticRegression'>'.
-    It usually means the pipeline being converted contains a
-    transformer or a predictor with no corresponding converter
-    implemented in sklearn-onnx. If the converted is implemented
-    in another library, you need to register
-    the converted so that it can be used by sklearn-onnx (function
-    update_registered_converter). If the model is not yet covered
-    by sklearn-onnx, you may raise an issue to
-    https://github.com/onnx/sklearn-onnx/issues
-    to get the converter implemented or even contribute to the
-    project. If the model is a custom model, a new converter must
-    be implemented. Examples can be found in the gallery.
-
-
-

Following sections show how to create a custom converter. -It assumes this new converter is not meant to be added to -this package but only to be registered and used when converting -a pipeline. To to contribute and add a converter -for a scikit-learn model, the logic is still the same, -only the converter registration changes. PR 737 can be used as -an example.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + A custom converter for a custom model - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

A custom converter for a custom model#

+

When sklearn-onnx converts a scikit-learn +pipeline, it looks into every transformer and predictor +and fetches the associated converter. The resulting +ONNX graph combines the outcome of every converter +in a single graph. If a model does not have its converter, +it displays an error message telling it misses a converter.

+

<<<

+
import numpy
+from sklearn.linear_model import LogisticRegression
+from skl2onnx import to_onnx
+
+
+class MyLogisticRegression(LogisticRegression):
+    pass
+
+
+X = numpy.array([[0, 0.1]])
+try:
+    to_onnx(MyLogisticRegression(), X)
+except Exception as e:
+    print(e)
+
+
+

>>>

+
    Unable to find a shape calculator for type '<class 'pyquickhelper.sphinxext.sphinx_runpython_extension.run_python_script_140552214879104.<locals>.MyLogisticRegression'>'.
+    It usually means the pipeline being converted contains a
+    transformer or a predictor with no corresponding converter
+    implemented in sklearn-onnx. If the converted is implemented
+    in another library, you need to register
+    the converted so that it can be used by sklearn-onnx (function
+    update_registered_converter). If the model is not yet covered
+    by sklearn-onnx, you may raise an issue to
+    https://github.com/onnx/sklearn-onnx/issues
+    to get the converter implemented or even contribute to the
+    project. If the model is a custom model, a new converter must
+    be implemented. Examples can be found in the gallery.
+
+
+

Following sections show how to create a custom converter. +It assumes this new converter is not meant to be added to +this package but only to be registered and used when converting +a pipeline. To to contribute and add a converter +for a scikit-learn model, the logic is still the same, +only the converter registration changes. PR 737 can be used as +an example.

+ +
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_3_new_operator.html b/tutorial_3_new_operator.html index cc63fa5ab..721f760be 100644 --- a/tutorial_3_new_operator.html +++ b/tutorial_3_new_operator.html @@ -1,458 +1,359 @@ - - - - - - - - - Extend ONNX, extend runtime — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

Extend ONNX, extend runtime#

-

Existing converters assume it is possible to convert -a model with the current list of ONNX operators. -This list is growing at every version but it may happen -a new node is needed. It could be added to ONNX specifications, -it requires a new release, but that’s not mandatory. -New nodes can easily be created by using a different domain. -A domain defines a set of operators, there are currently two -officially supported domains: ONNX operators and -ONNX ML operators. Custom domains can be used. -Once this new node is defined, a converter can use it. -That leaves the last issue: the runtime must be aware -of the implementation attached to this new node. -That’s the difficult part.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Extend ONNX, extend runtime - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Extend ONNX, extend runtime#

+

Existing converters assume it is possible to convert +a model with the current list of ONNX operators. +This list is growing at every version but it may happen +a new node is needed. It could be added to ONNX specifications, +it requires a new release, but that’s not mandatory. +New nodes can easily be created by using a different domain. +A domain defines a set of operators, there are currently two +officially supported domains: ONNX operators and +ONNX ML operators. Custom domains can be used. +Once this new node is defined, a converter can use it. +That leaves the last issue: the runtime must be aware +of the implementation attached to this new node. +That’s the difficult part.

+ +
+ +
+
+ +
+ +
+
+ + + + \ No newline at end of file diff --git a/tutorial_4_advanced.html b/tutorial_4_advanced.html index 63c8bb87d..e3cfb270c 100644 --- a/tutorial_4_advanced.html +++ b/tutorial_4_advanced.html @@ -1,447 +1,349 @@ - - - - - - - - - Advanced scenarios — sklearn-onnx 1.11.1 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- - -
- - - -
- -
- -
- - -
- - - - - - -
- -
- -
-

Advanced scenarios#

-

Unexpected discrepencies may appear. This a list of examples -with issues and resolved issues.

- -
- - -
- - - - - -
- - -
-
- - - -
-
- - - - - -
-
- + + + + + + + + + Advanced scenarios - sklearn-onnx 1.14.0 documentation + + + + + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+ +
+ +
+ +
+
+ + + + \ No newline at end of file